repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
gem/oq-engine
|
openquake/sep/calculators.py
|
1
|
2395
|
from typing import Optional, Union
import osgeo
import numpy as np
import pandas as pd
try:
import xarray as xr
except ImportError:
pass
from .landslide.common import (
static_factor_of_safety,
rock_slope_static_factor_of_safety,
)
from .landslide.newmark import (
newmark_critical_accel,
newmark_displ_from_pga_M,
prob_failure_given_displacement,
)
from .landslide.rotational import rotational_critical_accel
def calc_newmark_soil_slide_single_event(
pga: Union[float, np.ndarray],
M: float,
slope: Union[float, np.ndarray],
cohesion: Union[float, np.ndarray],
friction_angle: Union[float, np.ndarray],
saturation_coeff: Union[float, np.ndarray] = 0.1,
slab_thickness: Union[float, np.ndarray] = 2.5,
soil_dry_density: Union[float, np.ndarray] = 1500.0,
water_density: float = 1000.0,
out_name=None,
) -> Union[float, np.ndarray]:
"""
"""
fs = static_factor_of_safety(
slope,
cohesion,
friction_angle,
saturation_coeff,
slab_thickness,
soil_dry_density,
water_density,
)
ca = newmark_critical_accel(fs, slope)
Dn = newmark_displ_from_pga_M(pga, ca, M)
if isinstance(Dn, xr.DataArray):
Dn.name = out_name
return Dn
def calc_newmark_soil_slide_event_set(
pga: Union[float, np.ndarray],
M: Union[float, np.ndarray],
slope: Union[float, np.ndarray],
cohesion: Union[float, np.ndarray],
friction_angle: Union[float, np.ndarray],
saturation_coeff: Union[float, np.ndarray] = 0.1,
slab_thickness: Union[float, np.ndarray] = 2.5,
soil_dry_density: Union[float, np.ndarray] = 1500.0,
water_density=1000.0,
) -> Union[float, np.ndarray]:
"""
"""
fs = static_factor_of_safety(
slope,
cohesion,
friction_angle,
saturation_coeff,
slab_thickness,
soil_dry_density,
water_density,
)
ca = newmark_critical_accel(fs, slope)
if isinstance(pga, xr.Dataset):
Dn = xr.Dataset(
{
k: newmark_displ_from_pga_M(da, ca, da.attrs["mag"])
for k, da in pga.data_vars.items()
}
)
# elif isinstance(pga, )
return Dn
def calc_rock_slope_failures():
pass
def calc_rotational_failures():
pass
def calculate_lateral_spreading():
pass
|
agpl-3.0
|
kagayakidan/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
65
|
50308
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
rahul-c1/scikit-learn
|
examples/ensemble/plot_forest_importances_faces.py
|
403
|
1519
|
"""
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
|
bsd-3-clause
|
henrykironde/scikit-learn
|
sklearn/decomposition/nmf.py
|
100
|
19059
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
rahul-c1/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
27
|
10643
|
from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits
from sklearn.svm.tests import test_svm
from sklearn.utils import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
clf.fit(X, Y)
sp_clf = svm.SVC(kernel='linear', probability=True, random_state=0)
sp_clf.fit(X_sp, Y)
assert_array_equal(sp_clf.predict(T), true_result)
assert_true(sparse.issparse(sp_clf.support_vectors_))
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_true(sparse.issparse(sp_clf.dual_coef_))
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_true(sparse.issparse(sp_clf.coef_))
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T), sp_clf.predict(T))
# refit with a different dataset
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
assert_array_almost_equal(clf.support_, sp_clf.support_)
assert_array_almost_equal(clf.predict(T2), sp_clf.predict(T2))
assert_array_almost_equal(clf.predict_proba(T2),
sp_clf.predict_proba(T2), 4)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
"""Test the sparse SVC with the iris dataset"""
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_error():
"""
Test that it gives proper exception on deficient input
"""
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
"""
Similar to test_SVC
"""
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
"""Test the sparse LinearSVC with the iris dataset"""
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
"""
Test class weights
"""
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
"""
Test weights on individual samples
"""
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
def test_sparse_liblinear_intercept_handling():
"""
Test that sparse liblinear honours intercept_scaling param
"""
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_realdata():
"""
Test on a subset from the 20newsgroups dataset.
This catchs some bugs if input is not correctly converted into
sparse format or weights are not correctly initialized.
"""
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
astrobayes/BMAD
|
chapter_10/code_10.11.py
|
1
|
2333
|
# From: Bayesian Models for Astrophysical Data, Cambridge Univ. Press
# (c) 2017, Joseph M. Hilbe, Rafael S. de Souza and Emille E. O. Ishida
#
# you are kindly asked to include the complete citation if you used this
# material in a publication
#
# Code 10.11 Beta model in Python using Stan, for accessing the
# relationship between the fraction of atomic gas and
# the galaxy stellar mass
#
# Statistical Model: Beta model in Python using Stan
#
# Astronomy case: Relation between atomic gas fraction
# and stellar mass
# taken from Bradford et al., 2015, ApJ 809, id. 146
#
# 1 response (Y - atomic gass fraction)
# 1 explanatory variable (x - log stellar mass)
#
# Original data from: http://www.astro.yale.edu/jdbradford/research.html
import numpy as np
import pandas as pd
import pystan
import statsmodels.api as sm
# Data
path_to_data = 'https://raw.githubusercontent.com/astrobayes/BMAD/master/data/Section_10p5/f_gas.csv'
# read data
data_frame = dict(pd.read_csv(path_to_data))
# built atomic gas fraction
y = np.array([data_frame['M_HI'][i]/
(data_frame['M_HI'][i] + data_frame['M_STAR'][i])
for i in range(data_frame['M_STAR'].shape[0])])
x = np.array([np.log(item) for item in data_frame['M_STAR']])
# prepare data for Stan
data = {}
data['Y'] = y
data['X'] = sm.add_constant((x.transpose()))
data['nobs'] = data['X'].shape[0]
data['K'] = data['X'].shape[1]
# Fit
stan_code="""
data{
int<lower=0> nobs; # number of data points
int<lower=0> K; # number of coefficients
matrix[nobs, K] X; # stellar mass
real<lower=0, upper=1> Y[nobs]; # atomic gas fraction
}
parameters{
vector[K] beta; # linear predictor coefficients
real<lower=0> theta;
}
model{
vector[nobs] pi;
real a[nobs];
real b[nobs];
for (i in 1:nobs){
pi[i] = inv_logit(X[i] * beta);
a[i] = theta * pi[i];
b[i] = theta * (1 - pi[i]);
}
# priors and likelihood
for (i in 1:K) beta[i] ~ normal(0, 100);
theta ~ gamma(0.01, 0.01);
Y ~ beta(a, b);
}
"""
# Run mcmc
fit = pystan.stan(model_code=stan_code, data=data, iter=7500, chains=3,
warmup=5000, thin=1, n_jobs=3)
# Output
print(fit)
|
gpl-3.0
|
LohithBlaze/scikit-learn
|
sklearn/metrics/pairwise.py
|
104
|
42995
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
matija94/show-me-the-code
|
data-science/CollectiveIntelligence/com/AI/singi/kolo1/skmeans.py
|
1
|
1172
|
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
X = np.array([ [1,2], [1,4], [1,0], [4,2], [4,4], [4,0] ], dtype=float)
x = KMeans(n_clusters=2).fit(X)
print x.cluster_centers_
print x.labels_==1
print X[x.labels_==1,0]
print X[x.labels_==1,1]
plt.scatter(X[x.labels_==1,0], X[x.labels_==1,1])
plt.scatter(X[x.labels_==0,0], X[x.labels_==0,1], c=u'r')
plt.show()
#2ndExample
X = np.zeros([200,2])
X[:100,0] = np.random.randint(1,2,100) + np.random.random(100)*2
X[100:,0] = np.random.randint(1,2,100) - np.random.random(100)*6
X[:,1] = np.random.randint(1,2,200) + np.random.random(200)
plt.scatter(X[:,0], X[:,1])
plt.show()
x = KMeans(n_clusters=2).fit(X)
plt.scatter(X[x.labels_==1,0], X[x.labels_==1,1])
plt.scatter(X[x.labels_==0,0], X[x.labels_==0,1], c=u'r')
plt.show()
#3rd example
#let's make 4 clusters out of randomly generated X data set
x = KMeans(n_clusters=4).fit(X)
plt.scatter(X[x.labels_==1,0], X[x.labels_==1,1])
plt.scatter(X[x.labels_==0,0], X[x.labels_==0,1], c=u'r')
plt.scatter(X[x.labels_==2,0], X[x.labels_==2,1], c=u'y')
plt.scatter(X[x.labels_==3,0], X[x.labels_==3,1], c=u'g')
plt.show()
|
mit
|
tlshaw/AnoThER-Seq
|
tools.py
|
1
|
2759
|
import pandas as pd
def CSVWriter (iterable, outLoc, header="", ):
"""
Writes an iterable to a CSV file.
:param iterable: List of list
:param outLoc: file location. Where to place it.
:param header: header of the CSV file
:return: 1
"""
if not iterable:
print ("nothing to write")
return 0
out = open(outLoc, 'w')
if header:
out.write(header+'\n')
#Only works if iterable is a nested list
for member in iterable:
for item in member:
out.write(str(item)+',')
out.write('\n')
print("write to "+outLoc+" successful.")
return 1
def seqParser(seqLoc):
"""
Takes a FASTA formatted list of sequences and returns a properly formatted nested list
:param seqLoc: fasta formatted file
:return: nested list in the form [[name, seq],...]
"""
f=open(seqLoc,'r')
RNAseqs = f.readlines()
f.close()
RNAseqlist = []
for i in range(0, len(RNAseqs)):
if RNAseqs[i][0] == ">":
RNAseqlist.append([RNAseqs[i].rstrip()[1:],RNAseqs[i+1].rstrip()])
return RNAseqlist
def countsin(inLoc):
"""
Takes saved count file and reads it into a counts nested list.
:param inLoc: counts file
:return: nested list. counts nested list. [[read, total number, unique number],...]
"""
countFile = open(inLoc, "r").readlines()
counts=[]
for i in range(1, len(countFile)):
temp = countFile[i].rstrip().split(",")
counts.append([temp[0][8:], temp[1], temp[2]])
return counts
def tailParser(inLoc):
"""
parses .tail file into a nested list usable by other modules
:param inLoc: CSV input .tail file as produced by aligner.tailcalc
:return: nested list. [[sequence, #reads, gene, 3'end, tail len, tail seq],...]
"""
f = open(inLoc, 'r')
tails = f.readlines()
f.close()
tailList = []
for i in range(len(tails)):
if i==0: continue #skips the header
line = tails[i].rstrip().split(',')
tailList.append(line)
return tailList
def repeater(item, list, reps):
'''Takes an item and a list and then adds a copy of the item to the list reps number of times.'''
for i in range(reps):
list.append(item)
return
def pdTailMaker(inLoc):
"""
Takes standard tail file and returns a pandas dataframe
"""
tails = tailParser(inLoc)
pdTails = []
for tail in tails:
type = tail[2][tail[2].find("|")+1:]
repeater([tail[2],tail[3],tail[4],tail[5],type],pdTails,int(tail[1]))
df = pd.DataFrame(pdTails,columns=['Gene','3Loc','TailLength','TailSeq', 'Type'])
df[['3Loc','TailLength']] = df[['3Loc','TailLength']].apply(pd.to_numeric,errors='coerce')
return df
|
mit
|
MiroBear/Information-Mining
|
StreetSigns/trainModels.py
|
1
|
5660
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm
# from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
# from sklearn.linear_model import LogisticRegression
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
# from sklearn.preprocessing import normalize
from StreetSigns.readTrafficSigns import DataReader
def classify(images, labels):
# create stratified labels with equal number of samples per class
classes, classCnts = np.unique(labels, return_counts=True)
evenCnt = len(labels) / len(classes)
evenCnt = int(evenCnt)
stratifiedLabels = np.repeat(classes, evenCnt)
stratifiedLabels = np.resize(stratifiedLabels, len(labels))
remainder = len(labels) - evenCnt * len(classes)
stratifiedLabels[len(labels)-remainder-1:-1] = classes[0:remainder]
X_train, X_test, y_train, y_test = train_test_split(
images, labels, test_size=0.25, random_state=0, stratify=stratifiedLabels) # labels)
# dimRed = LDA()
# X_train = dimRed.fit_transform(X_train, y_train)
# X_test = dimRed.transform(X_test)
print('Training with LinearSVC')
clf = svm.LinearSVC(penalty='l2', C=0.5)
clf.fit(X_train, y_train)
scores = clf.score(X_test, y_test)
print(scores)
# TODO: reduce dimensionality of features by PCA and do classification on this data. This should get rid of
# background noise.
# TODO: use spatial pyramid matching
# https://github.com/wihoho/Image-Recognition
# TODO: get best individuals from each class, extract the shape and create a mask to cancel out the background
# for all images of this class.
# y_pred = clf.predict(X_test)
# confSVC = confusion_matrix(y_test, y_pred)
# plt.figure()
# plot_confusion_matrix(confSVC, classes=classes)
# plt.show()
# print('Training with LDA')
# clf = LDA(solver='lsqr', shrinkage='auto')
# clf.fit(X_train, y_train)
# scores = clf.score(X_test, y_test)
# print(scores)
# print('Training with LR')
# clf = LogisticRegression()
# clf.fit(X_train, y_train)
# scores = clf.score(X_test, y_test)
# print(scores)
#
# print('Training with Random Forest')
# clf = RandomForestClassifier(n_estimators=100)
# clf.fit(X_train, y_train)
# scores = clf.score(X_test, y_test)
# print(scores)
def main():
r = DataReader()
# r = reader.DataReader()
# r.clear()
images, labels = r.readImages()
print('#images: ' + str(len(images)))
classes = np.unique(labels)
print(np.sort(classes.astype(int)))
hogFeatures, labels = r.readHOG()
print('#hogFeatures: ' + str(len(hogFeatures[0])))
hueFeatures, labels = r.readHue()
print('#hueFeatures: ' + str(len(hueFeatures[0])))
features = combine(hogFeatures, hueFeatures)
# features = hogFeatures
# imagesVectorized = np.empty([len(labels), images[0].size])
# for i in range(len(labels)):
# imagesVectorized[i] = images[i].ravel()
# features = imagesVectorized
# features = normalize(features)
# HOG1
# no normalize: 0.982432432432
# normalize: 0.980630630631
# HOG2
# 0.991441441441
# same with hue features combined
# np.histogram(labels.astype(int))
# plt.hist(labels.astype(int), bins=42)
# plt.title('Classes')
# plt.show()
# Classes are not evenly distributed
# TODO:
# - modify images a bit to create extra variants, e.g. by scaling, rotating, blurring, distorting
# - or apply stratified sampling during cross-validation or training-test split
'''
TODO: extract main colors from all images as additional feature.
Problems:
- common colors for all images, i.e. define a small set of colors to which all are mapped. Could be problematic for
corner colors.
- main colors to be extracted e.g. by ColorThief: https://github.com/fengsp/color-thief-py/blob/master/colorthief.py
-> per image and image dependent. i.e. no mapping to predefined colors. This must be done on our own.
'''
classify(features, labels)
def combine(X1, X2):
n = len(X1)
n1 = len(X1[0])
n2 = len(X2[0])
X = np.empty([n, n1 + n2])
for i in range(len(X)):
X[i, 0:n1] = X1[i]
X[i, n1:n1+n2] = X2[i]
return X
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# print(cm)
np.fill_diagonal(cm, 1)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
# fmt = '.2f' if normalize else 'd'
# thresh = cm.max() / 2.
# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if __name__ == '__main__':
main()
|
gpl-3.0
|
omrihar/1_npfi
|
fig3.py
|
1
|
8078
|
#!/usr/bin/env python2
# encoding: utf-8
'''
fig3.py
Written by:
Omri Har-Shemesh, Computational Science Lab, University of Amsterdam
[email protected]
Last updated on 23 September 2015
Description:
Figure 3 in Ref.[1]
References:
[1] O. Har-Shemesh, R. Quax, B. Miñano, A.G. Hoekstra, P.M.A. Sloot, Non-parametric
estimation of Fisher information from real data, arxiv:1507.00964[stat.CO]
Functions:
simulate_data
plot_date
Dependencies:
numpy
matplotlib
timeit
cPickle
os
gzip
npfi.py
'''
from __future__ import division
import numpy as np
from numpy.random import normal
import matplotlib.pyplot as plt
import os
import gzip
import cPickle as pickle
import timeit
from npfi import npfi, get_pdfs_from_data
def simulate_data(ss, es, N, rep, zero, G, alpha, fname):
""" Simulates the data for the plot
Args:
ss: An array of sigma values to estimate the FI at.
es: An array of epsilon values to estimate the FI at.
N: Number of data points for each PDF.
rep: Number of repetitions of the whole simulation.
zero: What should npfi consider as zero
G: G for DEFT
alpha: alpha for DEFT
fname: Name of the file where the simulation data will be stored.
Returns:
results: A dictionary with all simulated data, which was also stored to
the file.
"""
# Results of the simulation will be stored here
data = {}
# Go over all sigma values in ss
for i, s in enumerate(ss):
true_fi = 2 / s ** 2
ess = [] # Store the epsilon values actually used
dss = [] # Store the ds values we used
FI_values_all = []
err_values_all = []
err_median, err_5, err_95 = [], [], []
for j, e in enumerate(es):
ds = s / (e * np.sqrt(N)) # Choose ds according to desired epsilon
# If ds >= s we have a problem of sampling with negative std
if ds >= s:
continue
dss.append(ds)
ess.append(e)
# Estimate the FI for rep repetitions
FI_values = []
for j in range(rep):
sim_data = [normal(size=N, scale=s),
normal(size=N, scale=s-ds),
normal(size=N, scale=s+ds)]
pdfs, bbox = get_pdfs_from_data(sim_data, method="deft", G=G,
alpha=alpha, bbox="adjust")
FI, a, b = npfi(pdfs, ds, bounds=bbox, logarithmic=False,
zero=zero, N=N)
FI_values.append(FI)
# More convenient to use as numpy arrays
FI_values = np.array(FI_values)
# Compute statistics from the results
err_values = (FI_values - true_fi) / true_fi
FI_values_all.append(FI_values)
err_values_all.append(err_values)
err_median.append(np.median(err_values))
err_5.append(np.percentile(err_values, 5))
err_95.append(np.percentile(err_values, 95))
data[s] = dict(FI_values_all=FI_values_all,
err_values_all=err_values_all,
err_median=np.array(err_median),
err_5=np.array(err_5),
err_95=np.array(err_95),
dss=dss,
ess=ess)
results = dict(data=data, N=N, rep=rep, ss=ss)
f = gzip.open(fname, "wb")
pickle.dump(results, f)
f.close()
return results
def plot_data(sim_data, fname=None):
""" Plots the data, either using plt.show or saves to a file.
Args:
sim_data: The data produced by sim_data
fname: If None, plot to screen, else save figure as fname.
Returns: Nothing
"""
# Setup the plotting parameters
params = {
'text.usetex' : True,
'font.size' : 10,
'font.family' : 'cmr',
'text.latex.unicode' : True
}
plt.rcParams.update(params)
plt.style.use("publication")
colors = {
0 : ["#08519c", "#6baed6", "#3182bd"],
1 : ["#006d2c", "#66c2a4", "#2ca25f"],
2 : ["#b30000", "#fdbb84", "#e34a33"],
3 : ["#54278f", "#9e9ac8", "#756bb1"],
4 : ["#252525", "#969696", "#cccccc"]
}
dot_styles = "o*vsph"
fig = plt.figure()
fig.set_size_inches(5, 5)
ax = fig.add_subplot(111)
i = 0
for s, data in sim_data['data'].iteritems():
true_fi = 2.0 / s**2
x = data['ess']
y = data['err_median']
y_5, y_95 = data['err_5'], data['err_95']
line, = ax.plot(x, y, dot_styles[i] + "-", lw=1.2, markersize=4, color=colors[i][0], label=r"$\sigma=%.1f$" % s)
ax.fill_between(x, y_5, y_95, alpha=.5, facecolor=colors[i][1], edgecolor=colors[i][2])
i += 1
ax.set_xlabel(r"$\varepsilon$")
ax.set_ylabel(r"$\frac{\mathrm{FI} - g_{\sigma\sigma}}{g_{\sigma\sigma}}$")
ax.set_xticks([0.01, 0.2, 0.4, 0.6, 0.8, 1.0])
ax.tick_params(right=False, top=False)
ax.grid("off")
ax.set_ylim(-0.75,4)
ax.set_xlim(0, 0.8)
ax.legend(loc="upper right", prop={"size": 8})
# Add inset to the original figure
add_inset = True
if add_inset:
from mpl_toolkits.axes_grid1.inset_locator import mark_inset, zoomed_inset_axes, inset_axes
xticks_down = True
if xticks_down:
axins = inset_axes(ax, width=1.2, height=1.2, bbox_to_anchor=(0.5, 0.98), bbox_transform=ax.transAxes)
else:
axins = inset_axes(ax, width=1.5, height=1.5, bbox_to_anchor=(0.6, 0.95), bbox_transform=ax.transAxes)
i = 0
for s, data in sim_data['data'].iteritems():
true_fi = 2.0 / (s**2)
x = data['ess']
y = data['err_median']
y_5, y_95 = data['err_5'], data['err_95']
line, = axins.plot(x, y, dot_styles[i] + "-", lw=1.2, markersize=4, color=colors[i][0], label=r"$\sigma=%.1f$" % s)
line_err = axins.errorbar(x, y, yerr=[y-y_5, y_95-y], ls=dot_styles[i], lw=1.2, markersize=4, color=colors[i][0], label=r"$\sigma=%.1f$" % s)
axins.set_xlim(0.015, 0.11)
axins.set_ylim(-0.2, 0.35)
axins.set_axis_bgcolor("w")
if xticks_down:
axins.set_xticks([0.02, 0.04, 0.06, 0.08, 0.1])
axins.set_xticklabels(["$0.02$", "$0.04$", "$0.06$", "$0.08$", ""])
axins.get_xaxis().set_tick_params(direction='in', labelcolor="k", labeltop=False, labelbottom=True, labelsize=8)
axins.get_yaxis().set_tick_params(labelsize=8)
else:
axins.get_xaxis().set_tick_params(direction='in', labelcolor="k", labeltop=True, labelbottom=False)
axins.get_yaxis().set_tick_params(direction='in', labelcolor="k", labelleft=False, labelright=True)
axins.set_frame_on(True)
axins.grid("off")
plt.setp(axins.spines.values(), color="k", lw=1)
mark_inset(ax, axins, loc1=2, loc2=4, fc="none", ec="k", lw=1)
if fname is None:
plt.show()
else:
plt.savefig(fname, dpi=700, bbox_inches="tight")
if __name__ == '__main__':
start_time = timeit.default_timer()
# Parameters of the plot
rep = 100
ss = [0.5, 1.0, 2.0, 5.0, 10.0]
es = np.array([0.01, 0.013, 0.015, 0.017, 0.019, 0.02, 0.03, 0.05,
0.07, 0.09, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,
0.9, 1.0])
N = 20000
G = 200
alpha = 3
zero = 1e-10
seed = 200
np.random.seed(seed)
fname = "fig3_data_rep_%d_N_%d_seed_%d.pklz" % (rep, N, seed)
if os.path.isfile(fname):
print("Found data file, plotting...")
f = gzip.open(fname, "rb")
data = pickle.load(f)
f.close()
else:
print("Simulating data...")
data = simulate_data(ss, es, N, rep, zero, G, alpha, fname)
if __debug__:
print("Obtaining the data took %.2f seconds" % (timeit.default_timer()-start_time))
plot_data(data)
|
mit
|
fabioticconi/scikit-learn
|
sklearn/pipeline.py
|
14
|
21389
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s"
% (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step
of the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies
``inverse_transform`` in inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all
steps of the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB(priors=None))])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, multiply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power=4,
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
|
bsd-3-clause
|
JRMeyer/Autotrace
|
under-development/autoencoder.py
|
2
|
3117
|
import numpy as np
import deepnet
import backprop
import cPickle as pickle
import scipy.io
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
def demo_autoencoder(stream):
#load and norm the data
data = np.load('scaled_images.npy')
data = np.asarray(data, dtype='float32')
data /= 255.0
#set up and train the initial deepnet
dnn = deepnet.DeepNet([data.shape[1], data.shape[1], data.shape[1],
42], ['sigmoid','sigmoid','sigmoid','sigmoid'], stream=stream)
dnn.train(data, [225, 75, 75], 0.0025)
#save the trained deepnet
pickle.dump(dnn, file('pretrained.pkl','wb'))
#unroll the deepnet into an autoencoder
autoenc = unroll_network(dnn.network)
##fine-tune with backprop
mlp = backprop.NeuralNet(network=autoenc, stream=stream)
trained = mlp.train(mlp.network, data, data, max_iter=30,
validErrFunc='reconstruction', targetCost='linSquaredErr')
##save
pickle.dump(trained, file('network.pkl','wb'))
def unroll_network(network):
'''
Takes a pre-trained network and treats it as an encoder network. The decoder
network is constructed by inverting the encoder. The decoder is then appended
to the input network to produce an autoencoder.
'''
decoder = []
encoder = []
for i in range(len(network)):
elayer = backprop.Layer(network[i].W.T, network[i].hbias, network[i].n_hidden, network[i].hidtype)
dlayer = backprop.Layer(network[i].W, network[i].vbias, network[i].n_visible, network[i].vistype)
encoder.append(elayer)
decoder.append(dlayer)
decoder.reverse()
encoder.extend(decoder)
return encoder
def save_net_as_mat(pickled_net):
'''
Takes the network pickle file saved in demo_autoencoder and saves it as a .mat
file for use with matlab
'''
network = pickle.load(file(pickled_net,'rb'))
mdic = {}
for i in range(len(network)/2):
mdic['W%d'%(i+1)] = network[i].W.as_numpy_array()
mdic['b%d'%(i+1)] = network[i].hbias.as_numpy_array()
mdic['hidtype%d'%(i+1)] = network[i].hidtype
scipy.io.savemat('network.mat', mdic)
def visualize_results(netfile, datafile):
network = pickle.load(file(netfile, 'rb'))
#network = unroll_network(dnn.network)
data = np.load(datafile)
data = np.asarray(data, dtype='float32')
data /= 255.0
mlp = backprop.NeuralNet(network=network)
recon = mlp.run_through_network(data, network)
inds = np.arange(recon.shape[0])
np.random.shuffle(inds)
for i in range(10):
dim = int(np.sqrt(data.shape[1]))
orig = data[inds[i]].reshape((dim,dim))
rec = recon[inds[i]].reshape((dim,dim))
plt.figure(i)
ax = plt.subplot(211)
plt.imshow(orig, cmap=plt.gray())
ax.set_yticks([])
ax.set_xticks([])
ax = plt.subplot(212)
plt.imshow(rec, cmap=plt.gray())
ax.set_yticks([])
ax.set_xticks([])
plt.savefig('img_%d.jpg'%(inds[i]))
if __name__ == "__main__":
demo_autoencoder()
visualize_results('network.pkl','scaled_images.npy')
|
mit
|
TomAugspurger/pandas
|
pandas/io/formats/html.py
|
1
|
22772
|
"""
Module for formatting output data in HTML.
"""
from textwrap import dedent
from typing import IO, Any, Dict, Iterable, List, Mapping, Optional, Tuple, Union, cast
from pandas._config import get_option
from pandas._libs import lib
from pandas import MultiIndex, option_context
from pandas.io.common import is_url
from pandas.io.formats.format import (
DataFrameFormatter,
TableFormatter,
buffer_put_lines,
get_level_lengths,
)
from pandas.io.formats.printing import pprint_thing
class HTMLFormatter(TableFormatter):
"""
Internal class for formatting output data in html.
This class is intended for shared functionality between
DataFrame.to_html() and DataFrame._repr_html_().
Any logic in common with other output formatting methods
should ideally be inherited from classes in format.py
and this class responsible for only producing html markup.
"""
indent_delta = 2
def __init__(
self,
formatter: DataFrameFormatter,
classes: Optional[Union[str, List[str], Tuple[str, ...]]] = None,
border: Optional[int] = None,
) -> None:
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements: List[str] = []
self.bold_rows = self.fmt.bold_rows
self.escape = self.fmt.escape
self.show_dimensions = self.fmt.show_dimensions
if border is None:
border = cast(int, get_option("display.html.border"))
self.border = border
self.table_id = self.fmt.table_id
self.render_links = self.fmt.render_links
if isinstance(self.fmt.col_space, int):
self.fmt.col_space = f"{self.fmt.col_space}px"
@property
def show_row_idx_names(self) -> bool:
return self.fmt.show_row_idx_names
@property
def show_col_idx_names(self) -> bool:
return self.fmt.show_col_idx_names
@property
def row_levels(self) -> int:
if self.fmt.index:
# showing (row) index
return self.frame.index.nlevels
elif self.show_col_idx_names:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# If the row index is not displayed a column of
# blank cells need to be included before the DataFrame values.
return 1
# not showing (row) index
return 0
def _get_columns_formatted_values(self) -> Iterable:
return self.columns
# https://github.com/python/mypy/issues/1237
@property
def is_truncated(self) -> bool: # type: ignore
return self.fmt.is_truncated
@property
def ncols(self) -> int:
return len(self.fmt.tr_frame.columns)
def write(self, s: Any, indent: int = 0) -> None:
rs = pprint_thing(s)
self.elements.append(" " * indent + rs)
def write_th(
self, s: Any, header: bool = False, indent: int = 0, tags: Optional[str] = None
) -> None:
"""
Method for writing a formatted <th> cell.
If col_space is set on the formatter then that is used for
the value of min-width.
Parameters
----------
s : object
The data to be written inside the cell.
header : bool, default False
Set to True if the <th> is for use inside <thead>. This will
cause min-width to be set if there is one.
indent : int, default 0
The indentation level of the cell.
tags : str, default None
Tags to include in the cell.
Returns
-------
A written <th> cell.
"""
if header and self.fmt.col_space is not None:
tags = tags or ""
tags += f'style="min-width: {self.fmt.col_space};"'
self._write_cell(s, kind="th", indent=indent, tags=tags)
def write_td(self, s: Any, indent: int = 0, tags: Optional[str] = None) -> None:
self._write_cell(s, kind="td", indent=indent, tags=tags)
def _write_cell(
self, s: Any, kind: str = "td", indent: int = 0, tags: Optional[str] = None
) -> None:
if tags is not None:
start_tag = f"<{kind} {tags}>"
else:
start_tag = f"<{kind}>"
if self.escape:
# escape & first to prevent double escaping of &
esc = {"&": r"&", "<": r"<", ">": r">"}
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
if self.render_links and is_url(rs):
rs_unescaped = pprint_thing(s, escape_chars={}).strip()
start_tag += f'<a href="{rs_unescaped}" target="_blank">'
end_a = "</a>"
else:
end_a = ""
self.write(f"{start_tag}{rs}{end_a}</{kind}>", indent)
def write_tr(
self,
line: Iterable,
indent: int = 0,
indent_delta: int = 0,
header: bool = False,
align: Optional[str] = None,
tags: Optional[Dict[int, str]] = None,
nindex_levels: int = 0,
) -> None:
if tags is None:
tags = {}
if align is None:
self.write("<tr>", indent)
else:
self.write(f'<tr style="text-align: {align};">', indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent=indent, header=header, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write("</tr>", indent)
def render(self) -> List[str]:
self._write_table()
if self.should_show_dimensions:
by = chr(215) # ×
self.write(
f"<p>{len(self.frame)} rows {by} {len(self.frame.columns)} columns</p>"
)
return self.elements
def write_result(self, buf: IO[str]) -> None:
buffer_put_lines(buf, self.render())
def _write_table(self, indent: int = 0) -> None:
_classes = ["dataframe"] # Default class.
use_mathjax = get_option("display.html.use_mathjax")
if not use_mathjax:
_classes.append("tex2jax_ignore")
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise TypeError(
"classes must be a string, list, "
f"or tuple, not {type(self.classes)}"
)
_classes.extend(self.classes)
if self.table_id is None:
id_section = ""
else:
id_section = f' id="{self.table_id}"'
self.write(
f'<table border="{self.border}" class="{" ".join(_classes)}"{id_section}>',
indent,
)
if self.fmt.header or self.show_row_idx_names:
self._write_header(indent + self.indent_delta)
self._write_body(indent + self.indent_delta)
self.write("</table>", indent)
def _write_col_header(self, indent: int) -> None:
truncate_h = self.fmt.truncate_h
if isinstance(self.columns, MultiIndex):
template = 'colspan="{span:d}" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = lib.no_default
else:
sentinel = False
levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths, levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (
values[:ins_col] + ("...",) + values[ins_col:]
)
else:
# sparse col headers do not receive a ...
values = (
values[:ins_col]
+ (values[ins_col - 1],)
+ values[ins_col:]
)
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + ("...",) + values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + ["..."] + values[ins_col:]
# see gh-22579
# Column Offset Bug with to_html(index=False) with
# MultiIndex Columns and Index.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code
# block below for standard columns index.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class and create a
# _get_formatted_column_labels function for code
# parity with DataFrameFormatter class.
if self.fmt.show_index_names:
name = self.columns.names[lnum]
row.append(pprint_thing(name or ""))
else:
row.append("")
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags, header=True)
else:
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Initially fill row with blank cells before column names.
# TODO: Refactor to remove code duplication with code block
# above for columns MultiIndex.
row = [""] * (self.row_levels - 1)
if self.fmt.index or self.show_col_idx_names:
# see gh-22747
# If to_html(index_names=False) do not show columns
# index names.
# TODO: Refactor to use _get_column_name_list from
# DataFrameFormatter class.
if self.fmt.show_index_names:
row.append(self.columns.name or "")
else:
row.append("")
row.extend(self._get_columns_formatted_values())
align = self.fmt.justify
if truncate_h:
ins_col = self.row_levels + self.fmt.tr_col_num
row.insert(ins_col, "...")
self.write_tr(row, indent, self.indent_delta, header=True, align=align)
def _write_row_header(self, indent: int) -> None:
truncate_h = self.fmt.truncate_h
row = [x if x is not None else "" for x in self.frame.index.names] + [""] * (
self.ncols + (1 if truncate_h else 0)
)
self.write_tr(row, indent, self.indent_delta, header=True)
def _write_header(self, indent: int) -> None:
self.write("<thead>", indent)
if self.fmt.header:
self._write_col_header(indent + self.indent_delta)
if self.show_row_idx_names:
self._write_row_header(indent + self.indent_delta)
self.write("</thead>", indent)
def _get_formatted_values(self) -> Dict[int, List[str]]:
with option_context("display.max_colwidth", None):
fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)}
return fmt_values
def _write_body(self, indent: int) -> None:
self.write("<tbody>", indent)
fmt_values = self._get_formatted_values()
# write values
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent + self.indent_delta)
else:
self._write_regular_rows(fmt_values, indent + self.indent_delta)
self.write("</tbody>", indent)
def _write_regular_rows(
self, fmt_values: Mapping[int, List[str]], indent: int
) -> None:
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
nrows = len(self.fmt.tr_frame)
if self.fmt.index:
fmt = self.fmt._get_formatter("__index__")
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row: List[str] = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
row = []
if self.fmt.index:
row.append(index_values[i])
# see gh-22579
# Column misalignment also occurs for
# a standard index when the columns index is named.
# Add blank cell before data cells.
elif self.show_col_idx_names:
row.append("")
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + self.row_levels
row.insert(dot_col_ix, "...")
self.write_tr(
row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels
)
def _write_hierarchical_rows(
self, fmt_values: Mapping[int, List[str]], indent: int
) -> None:
template = 'rowspan="{span}" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
nrows = len(frame)
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = list(zip(*idx_values))
if self.fmt.sparsify:
# GH3547
sentinel = lib.no_default
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
# cast here since if truncate_v is True, self.fmt.tr_row_num is not None
ins_row = cast(int, ins_row)
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = "..."
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = "..."
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(
ins_row, tuple(["..."] * len(level_lengths))
)
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = "..."
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, "...")
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
row.insert(
self.row_levels - sparse_offset + self.fmt.tr_col_num, "..."
)
self.write_tr(
row,
indent,
self.indent_delta,
tags=tags,
nindex_levels=len(levels) - sparse_offset,
)
else:
row = []
for i in range(len(frame)):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ["..."] * len(row)
self.write_tr(
str_sep_row,
indent,
self.indent_delta,
tags=None,
nindex_levels=self.row_levels,
)
idx_values = list(
zip(*frame.index.format(sparsify=False, adjoin=False, names=False))
)
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(self.ncols))
if truncate_h:
row.insert(self.row_levels + self.fmt.tr_col_num, "...")
self.write_tr(
row,
indent,
self.indent_delta,
tags=None,
nindex_levels=frame.index.nlevels,
)
class NotebookFormatter(HTMLFormatter):
"""
Internal class for formatting output data in html for display in Jupyter
Notebooks. This class is intended for functionality specific to
DataFrame._repr_html_() and DataFrame.to_html(notebook=True)
"""
def _get_formatted_values(self) -> Dict[int, List[str]]:
return {i: self.fmt._format_col(i) for i in range(self.ncols)}
def _get_columns_formatted_values(self) -> List[str]:
return self.columns.format()
def write_style(self) -> None:
# We use the "scoped" attribute here so that the desired
# style properties for the data frame are not then applied
# throughout the entire notebook.
template_first = """\
<style scoped>"""
template_last = """\
</style>"""
template_select = """\
.dataframe %s {
%s: %s;
}"""
element_props = [
("tbody tr th:only-of-type", "vertical-align", "middle"),
("tbody tr th", "vertical-align", "top"),
]
if isinstance(self.columns, MultiIndex):
element_props.append(("thead tr th", "text-align", "left"))
if self.show_row_idx_names:
element_props.append(
("thead tr:last-of-type th", "text-align", "right")
)
else:
element_props.append(("thead th", "text-align", "right"))
template_mid = "\n\n".join(map(lambda t: template_select % t, element_props))
template = dedent("\n".join((template_first, template_mid, template_last)))
self.write(template)
def render(self) -> List[str]:
self.write("<div>")
self.write_style()
super().render()
self.write("</div>")
return self.elements
|
bsd-3-clause
|
nclsHart/glances
|
setup.py
|
1
|
3348
|
#!/usr/bin/env python
import glob
import os
import sys
from setuptools import setup
is_chroot = os.stat('/').st_ino != 2
def get_data_files():
data_files = [
('share/doc/glances', ['AUTHORS', 'COPYING', 'NEWS', 'README.rst',
'conf/glances.conf', 'docs/glances-doc.html']),
('share/doc/glances/images', glob.glob('docs/images/*.png')),
('share/man/man1', ['man/glances.1'])
]
if hasattr(sys, 'real_prefix'): # virtualenv
conf_path = os.path.join(sys.prefix, 'etc', 'glances')
elif os.name == 'posix' and (os.getuid() == 0 or is_chroot):
# Unix-like + root privileges/chroot environment
if 'bsd' in sys.platform:
conf_path = os.path.join(sys.prefix, 'etc', 'glances')
elif 'linux' in sys.platform:
conf_path = os.path.join('/etc', 'glances')
elif 'darwin' in sys.platform:
conf_path = os.path.join('/usr/local', 'etc', 'glances')
elif 'win32' in sys.platform: # windows
conf_path = os.path.join(os.environ.get('APPDATA'), 'glances')
else: # Unix-like + per-user install
conf_path = os.path.join('etc', 'glances')
data_files.append((conf_path, ['conf/glances.conf']))
for mo in glob.glob('i18n/*/LC_MESSAGES/*.mo'):
data_files.append(
(os.path.dirname(mo).replace('i18n/', 'share/locale/'), [mo]))
return data_files
def get_requires():
requires = ['psutil>=2.0.0']
if sys.platform.startswith('win'):
requires += ['colorconsole']
if sys.version_info < (2, 7):
requires += ['argparse']
return requires
setup(
name='Glances',
version='2.3',
description="A cross-platform curses-based monitoring tool",
long_description=open('README.rst').read(),
author='Nicolas Hennion',
author_email='[email protected]',
url='https://github.com/nicolargo/glances',
# download_url='https://s3.amazonaws.com/glances/glances-2.3.tar.gz',
license="LGPL",
keywords="cli curses monitoring system",
install_requires=get_requires(),
extras_require={
'WEB': ['bottle'],
'SENSORS': ['py3sensors'],
'BATINFO': ['batinfo'],
'SNMP': ['pysnmp'],
'CHART': ['matplotlib'],
'BROWSER': ['zeroconf>=0.16', 'netifaces'],
'RAID': ['pymdstat'],
'DOCKER': ['docker-py'],
'EXPORT': ['influxdb', 'statsd'],
'ACTION': ['pystache']
},
packages=['glances'],
include_package_data=True,
data_files=get_data_files(),
test_suite="unitest.py",
entry_points={"console_scripts": ["glances=glances:main"]},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console :: Curses',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
]
)
|
lgpl-3.0
|
hlin117/statsmodels
|
statsmodels/sandbox/examples/ex_mixed_lls_0.py
|
34
|
5233
|
# -*- coding: utf-8 -*-
"""Example using OneWayMixed
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects and random coefficients, and uses OneWayMixed to estimate it.
"""
from __future__ import print_function
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
np.random.seed(978326)
nsubj = 2000
units = []
nobs_i = 4 #number of observations per unit, changed below
nx = 4 #number fixed effects
nz = 2 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
#store true parameter for checking
gamma_re_true.append(gamma_re)
#for testing unbalanced case, let's change nobs per unit
if i > nsubj//4:
nobs_i = 6
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
Z = np.random.standard_normal((nobs_i, nz-1))
Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
X = np.hstack((X,Z))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print(dir(m)
#print(vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('std of above')
print(np.sqrt(np.diag(np.cov(b_re, rowvar=0))))
print('m.cov_random()')
print(m.cov_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print(np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print('')
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print(res.llf) #based on MLE, does not include constant
print(res.tvalues)
print(res.pvalues)
print(res.t_test([1,-1,0,0,0,0]))
print('test mean of both random effects variables is zero')
print(res.f_test([[0,0,0,0,1,0], [0,0,0,0,0,1]]))
plots = res.plot_random_univariate(bins=50)
fig = res.plot_scatter_pairs(0, 1)
import matplotlib.pyplot as plt
plt.show()
|
bsd-3-clause
|
sigopt/sigopt_sklearn
|
test/test_search.py
|
1
|
7251
|
from mock import MagicMock, patch
import pytest
import warnings
import sklearn.datasets
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
import sigopt
import sigopt.objects
from sigopt_sklearn.search import SigOptSearchCV
from test_util import random_assignments
warnings.simplefilter("error", append=True)
GradientBoostingClassifier_PARAM_DOMAIN = {
'n_estimators': (20, 500),
'min_samples_split': (2, 5),
'min_samples_leaf': (1, 3),
'learning_rate': (0.01, 1.0)
}
GradientBoostingClassifier_EXPERIMENT_DEF = {
'name': 'GradientBoostingClassifier (sklearn)',
'parameters': [
{
'type': 'int',
'name': 'n_estimators',
'bounds': {
'min': 20,
'max': 500,
},
},
{
'type': 'int',
'name': 'min_samples_split',
'bounds': {
'min': 2,
'max': 5,
},
},
{
'type': 'int',
'name': 'min_samples_leaf',
'bounds': {
'min': 1,
'max': 3,
},
},
{
'type': 'double',
'name': 'learning_rate',
'bounds': {
'min': 0.01,
'max': 1.0,
},
},
],
}
SVC_PARAM_DOMAIN = {
'C': {'little': 1e-3, 'some': 1, 'lots': 1e3}
}
SVC_EXPERIMENT_DEF = {
'name': 'SVC with fancy categoricals',
'parameters': [
{
'type': 'categorical',
'name': 'C',
'categorical_values': [
{'name': 'little'},
{'name': 'some'},
{'name': 'lots'}
]
}
]
}
def zero_corner(experiment_definition):
"""Take the parameters corresponding to the zero corner. All of the minimums and the first categories."""
return {p['name']: (p['bounds']['min'] if p['type'] in ['int', 'double'] else p['categorical_values'][0]['name'])
for p in experiment_definition['parameters']}
def mock_connection(experiment_definition):
return MagicMock(return_value=MagicMock(
experiments=MagicMock(return_value=MagicMock(
create=MagicMock(return_value=MagicMock(
id="exp_id",
)),
fetch=MagicMock(return_value=MagicMock(
progress=MagicMock(
best_observation=MagicMock(
assignments=MagicMock(
to_json=MagicMock(return_value=zero_corner(experiment_definition)),
))),
)),
suggestions=MagicMock(return_value=MagicMock(
create=MagicMock(return_value=MagicMock(
assignments=MagicMock(
to_json=MagicMock(side_effect=lambda: random_assignments(experiment_definition))),
id="sugg_id",
)),
)),
observations=MagicMock(return_value=MagicMock(
create=MagicMock(return_value=MagicMock(
id="obs_id",
value=52,
)),
)),
best_assignments=MagicMock(return_value=MagicMock(
fetch=MagicMock(return_value=MagicMock(
data=[MagicMock(
assignments=MagicMock(
to_json=MagicMock(return_value=zero_corner(experiment_definition)),
)
)],
)),
)),
))
))
class TestSearch(object):
def test_create(self):
SigOptSearchCV(
estimator=GradientBoostingClassifier,
param_domains=GradientBoostingClassifier_PARAM_DOMAIN,
client_token='client_token'
)
def test_no_token(self):
with pytest.raises(ValueError):
SigOptSearchCV(estimator=GradientBoostingClassifier, param_domains=GradientBoostingClassifier_PARAM_DOMAIN)
@patch('sigopt.Connection', new=mock_connection(GradientBoostingClassifier_EXPERIMENT_DEF))
def test_search(self):
conn = sigopt.Connection()
n_iter = 5
folds = 3
cv = SigOptSearchCV(
estimator=GradientBoostingClassifier(),
param_domains=GradientBoostingClassifier_PARAM_DOMAIN,
client_token='client_token',
n_iter=n_iter,
cv=folds
)
assert len(conn.experiments().create.mock_calls) == 0
assert len(conn.experiments().fetch.mock_calls) == 0
assert len(conn.experiments().suggestions.create.mock_calls) == 0
assert len(conn.experiments().observations.create.mock_calls) == 0
data = sklearn.datasets.load_iris()
cv.fit(data['data'], data['target'])
assert len(conn.experiments().create.mock_calls) == 1
create_definition = conn.experiments().create.call_args[1]
assert create_definition['name'] == GradientBoostingClassifier_EXPERIMENT_DEF['name']
assert len(create_definition['parameters']) == len(GradientBoostingClassifier_EXPERIMENT_DEF['parameters'])
for p in GradientBoostingClassifier_EXPERIMENT_DEF['parameters']:
assert p in create_definition['parameters']
assert len(conn.experiments().best_assignments().fetch.mock_calls) == 1
assert len(conn.experiments().suggestions().create.mock_calls) == n_iter
assert len(conn.experiments().observations().create.mock_calls) == n_iter
assert cv.best_params_ == zero_corner(GradientBoostingClassifier_EXPERIMENT_DEF)
@patch('sigopt.Connection', new=mock_connection(GradientBoostingClassifier_EXPERIMENT_DEF))
def test_provide_experiment(self):
conn = sigopt.Connection()
n_iter = 5
folds = 3
experiment = sigopt.objects.Experiment({'id': '1'})
cv = SigOptSearchCV(
estimator=GradientBoostingClassifier(),
param_domains=GradientBoostingClassifier_PARAM_DOMAIN,
client_token='client_token',
n_iter=n_iter,
cv=folds,
experiment=experiment,
)
data = sklearn.datasets.load_iris()
cv.fit(data['data'], data['target'])
assert len(conn.experiments().create.mock_calls) == 0
assert len(conn.experiments().suggestions().create.mock_calls) == n_iter
assert len(conn.experiments().observations().create.mock_calls) == n_iter
@patch('sigopt.Connection', new=mock_connection(SVC_EXPERIMENT_DEF))
def test_non_string_categorical(self):
data = sklearn.datasets.load_iris()
clf = SigOptSearchCV(SVC(gamma='auto'), SVC_PARAM_DOMAIN, client_token='client_token', n_iter=5, cv=3)
clf.fit(data['data'], data['target'])
def test_bad_param_range1(self):
with pytest.raises(Exception):
clf = SigOptSearchCV(
SVC(),
{
'bad_param_range': (1,),
'hidden_layer_sizes': {'5': (5,), '5,4,3': (5, 4, 3)}
},
client_token='client_token',
n_iter=5
)
clf._transform_param_domains(clf.param_domains)
def test_bad_param_range2(self):
with pytest.raises(Exception):
clf = SigOptSearchCV(
SVC(),
{
'bad_param_range': (1, 2, 3),
'hidden_layer_sizes': {'5': (5,), '5,4,3': (5, 4, 3)}
},
client_token='client_token',
n_iter=5
)
clf._transform_param_domains(clf.param_domains)
def test_warn_param_range_list(self):
with pytest.warns(UserWarning):
clf = SigOptSearchCV(
SVC(),
{'max_iter': [5, 10]},
client_token='client_token',
n_iter=5
)
clf._transform_param_domains(clf.param_domains)
def test_bad_param_range_not_iterable(self):
with pytest.raises(Exception):
clf = SigOptSearchCV(
SVC(),
{'max_iter': 15},
client_token='client_token',
n_iter=5
)
clf._transform_param_domains(clf.param_domains)
|
mit
|
nicolas998/Op_Radar
|
06_Codigos/Actualiza_MeanRain_Hist.py
|
1
|
4188
|
#!/usr/bin/env python
from wmf import wmf
import numpy as np
import pickle
import pandas as pnd
import pylab as pl
import argparse
import textwrap
import netCDF4
from multiprocessing import Pool
import os
#-------------------------------------------------------------------
#FUNCIONES LOCALES
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#PARSEADOR DE ARGUMENTOS
#-------------------------------------------------------------------
#Parametros de entrada del trazador
parser=argparse.ArgumentParser(
prog='Actualiza_Caudales_Hist',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Actualiza la lluvia media sobre la cuenca estimada por alguna
metodologia.
'''))
#Parametros obligatorios
parser.add_argument("rutaRain",help="(Obligatorio) Ruta donde se encuentran los archivos de lluvia ")
parser.add_argument("-n", "--newhist", help="(Opcional) Con esta opcion el script genera un nuevo punto de generacion de historicos",
action = 'store_true', default = False)
parser.add_argument("-i", "--fechai", help="(Opcional) Fecha de inicio de nuevo punto de historicos (YYYY-MM-DD HH:MM)")
parser.add_argument("-f", "--fechaf", help="(Opcional) Fecha de fin de nuevo punto de historicos (YYYY-MM-DD HH:MM)")
parser.add_argument("-v","--verbose",help="(Opcional) Hace que el modelo indique en que porcentaje de ejecucion va",
action = 'store_true', default = False)
args=parser.parse_args()
nombre = 'Mean_Rain_History.rainh'
#-------------------------------------------------------------------
#CHECK DE SI YA HAY ARCHIVO DE HISTORIA O SI RE ESCRIBE
#-------------------------------------------------------------------
if args.newhist:
#Fechas de inicio y fin
FechaI = args.fechai
FechaF = args.fechaf
#Genera el Data Frame vacio desde el inicio hasta el punto de ejecucion
DifIndex = pnd.date_range(FechaI, FechaF, freq='5min')
Rh = pnd.DataFrame(np.zeros((DifIndex.size, 3))*np.nan,
index=pnd.date_range(FechaI, FechaF, freq='5min'),
columns = ['media','baja','alta'])
#Guarda los caudales base para los historicos
Lhist = os.listdir(args.rutaRain)
try:
pos = Lhist.index(nombre)
flag = raw_input('Aviso: El archivo historico : '+nombre+' ya existe, desea sobre-escribirlo, perdera la historia de este!! (S o N): ')
if flag == 'S':
flag = True
else:
flag = False
except:
flag = True
#Guardado
if flag:
Rh.to_msgpack(args.rutaRain + nombre)
#-------------------------------------------------------------------
#ACTUALIZA HISTORIA DE LLUVIA
#-------------------------------------------------------------------
#Lista caudales simulados sin repetir
Lsim = os.listdir('/home/nicolas/Operacional/Op_Radar/03_Simulaciones/01_Rain/')
Lsim = [i for i in Lsim if i.endswith('hdr')]
names = [i.split('_')[1][:-4] for i in Lsim]
#busca que este el archivo base en la carpeta
try:
#lee la lluvia de los encabezados
R = []
for n,l in zip(names, Lsim):
r = pnd.read_csv(args.rutaRain + l,
header = 5,
index_col = 1,
parse_dates = True,
usecols=(2,3))
R.append(r.values[0].tolist())
R = np.array(R)
#Genera un DataFrame con eso
Rt = pnd.DataFrame(R.T,
index=[r.index[0],],
columns=names)
#Lee el historico de lluvia
Rhist = pnd.read_msgpack(args.rutaRain + nombre)
#Rhist = pnd.read_msgpack('Mean_Rain_History.rainh')
#encuentra el pedazo que falta entre ambos
Gap = pnd.date_range(Rhist.index[-1], Rt.index[0], freq='5min')
#Genera el pedazo con faltantes
GapData = pnd.DataFrame(np.zeros((Gap.size - 2, 3))*np.nan,
index= Gap[1:-1],
columns = Rhist.columns)
#pega la informacion
Rhist = Rhist.append(GapData)
Rhist = Rhist.append(Rt)
#Guarda el archivo historico
Rhist.to_msgpack(args.rutaRain + nombre)
#Aviso
print 'Aviso: Se ha actualizado el archivo de precipitacion media historica: '+nombre
except:
print 'Aviso: No se encuentra el historico de precipitacion '+nombre+' Por lo tanto no se actualiza'
|
gpl-3.0
|
ishanic/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
myt00seven/svrg
|
bk/svrg_bn_bk/run_on_sparse_features.py
|
1
|
2589
|
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
import lasagne
from load_dataset import *
from neuralnet import train
from deep import DeepAutoEncoder
from sparse_autoencoder import SparseAutoEncoder
from neuralclassifier import NeuralClassifier
def main():
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
input_var = T.matrix('inputs')
target_var = T.ivector('targets')
n_hidden = 500
sparse_model = 'adam_sparse_3.0_not_denoising'
classify_model = 'adam_classif_on_sparse'
network = SparseAutoEncoder(n_input=X_train.shape[1], n_hidden=n_hidden, input_var=input_var)
with np.load('models/model_%s.npz' % sparse_model) as f:
param_values = [f['arr_%d' % j] for j in range(len(f.files))]
lasagne.layers.set_all_param_values(network.output_layer, param_values)
output_layer = lasagne.layers.DenseLayer(network.hidden_layer, num_units=10, nonlinearity=lasagne.nonlinearities.softmax)
prediction = lasagne.layers.get_output(output_layer)
lambd = 0.0
l2_reg = lasagne.regularization.regularize_layer_params(output_layer, lasagne.regularization.l2)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) + lambd * l2_reg
loss = loss.mean()
params = lasagne.layers.get_all_params(output_layer, trainable=True)
params = params[2:]
updates = lasagne.updates.adam(loss, params, learning_rate=0.01)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
if X_val is not None:
test_prediction = lasagne.layers.get_output(output_layer, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
val_fn = theano.function([input_var, target_var], test_loss)
else:
val_fn = None
n_epochs = 100
batch_size = 500
train_error, validation_error = train(
X_train, y_train, X_val, y_val,
train_fn, val_fn,
n_epochs, batch_size=batch_size#, toprint=it
)
plt.plot(validation_error, label=classify_model)
np.savez('models/model_%s.npz' % classify_model, *lasagne.layers.get_all_param_values(output_layer))
np.savez('models/model_%s_val_error.npz' % classify_model, validation_error)
accuracy = (lasagne.layers.get_output(output_layer, X_test, deterministic=True).eval().argmax(axis=1) == y_test).mean()
print "Model: {}; accuracy: {}".format(classify_model, accuracy)
main()
|
mit
|
Myasuka/scikit-learn
|
benchmarks/bench_tree.py
|
297
|
3617
|
"""
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/tests/series/indexing/test_get.py
|
4
|
4324
|
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
def test_get():
# GH 6383
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
)
)
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(
np.array(
[
43,
48,
60,
48,
50,
51,
50,
45,
57,
48,
56,
45,
51,
39,
55,
43,
54,
52,
51,
54,
]
),
index=pd.Float64Index(
[
25.0,
36.0,
49.0,
64.0,
81.0,
100.0,
121.0,
144.0,
169.0,
196.0,
1225.0,
1296.0,
1369.0,
1444.0,
1521.0,
1600.0,
1681.0,
1764.0,
1849.0,
1936.0,
]
),
)
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({"i": [0] * 3, "b": [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default="Missing")
assert result == "Missing"
vc = df.b.value_counts()
result = vc.get(False, default="Missing")
assert result == 3
result = vc.get(True, default="Missing")
assert result == "Missing"
def test_get_nan():
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default="Missing") == "Missing"
def test_get_nan_multiple():
# GH 8569
# ensure that fixing "test_get_nan" above hasn't broken get
# with multiple elements
s = pd.Float64Index(range(10)).to_series()
idx = [2, 30]
assert s.get(idx) is None
idx = [2, np.nan]
assert s.get(idx) is None
# GH 17295 - all missing keys
idx = [20, 30]
assert s.get(idx) is None
idx = [np.nan, np.nan]
assert s.get(idx) is None
def test_get_with_default():
# GH#7725
d0 = ["a", "b", "c", "d"]
d1 = np.arange(4, dtype="int64")
others = ["e", 10]
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
assert s.get(i) == d
assert s.get(i, d) == d
assert s.get(i, "z") == d
for other in others:
assert s.get(other, "z") == "z"
assert s.get(other, other) == other
@pytest.mark.parametrize(
"arr",
[np.random.randn(10), tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")],
)
def test_get2(arr):
# TODO: better name, possibly split
# GH#21260
ser = Series(arr, index=[2 * i for i in range(len(arr))])
assert ser.get(4) == ser.iloc[2]
result = ser.get([4, 6])
expected = ser.iloc[[2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get(slice(2))
expected = ser.iloc[[0, 1]]
tm.assert_series_equal(result, expected)
assert ser.get(-1) is None
assert ser.get(ser.index.max() + 1) is None
ser = Series(arr[:6], index=list("abcdef"))
assert ser.get("c") == ser.iloc[2]
result = ser.get(slice("b", "d"))
expected = ser.iloc[[1, 2, 3]]
tm.assert_series_equal(result, expected)
result = ser.get("Z")
assert result is None
assert ser.get(4) == ser.iloc[4]
assert ser.get(-1) == ser.iloc[-1]
assert ser.get(len(ser)) is None
# GH#21257
ser = Series(arr)
ser2 = ser[::2]
assert ser2.get(1) is None
|
bsd-3-clause
|
gandalf15/CS2521-ALGORITHMIC-PROBLEM-SOLVING
|
convex-hull.py
|
1
|
9049
|
import random
import time
import matplotlib.pyplot as plt
from operator import itemgetter
def gen_rand_uniq_points(x_range, y_range, n):
try:
x_points = random.sample(range(0, x_range), n)
y_points = random.sample(range(0, y_range), n)
points = []
for i in range(0,n):
points.append([x_points[i],y_points[i]])
return points
except ValueError:
print('Sample size exceeded population size.')
def get_y_intersection(point_a, point_b, x_of_line):
try:
#define where is the vertical line
x_coord_of_line = x_of_line
# calculate the slope between two points
slope = float((point_b[1]-point_a[1]))/float((point_b[0]-point_a[0]))
#calculate the point where line between points a and b intersects with vertical line
y_coord_of_intersection = point_a[1]+((x_coord_of_line - point_a[0])*slope)
#return float y_coord_of_intersection
return y_coord_of_intersection
except ZeroDivisionError:
print ("After 7.5 million years....",
"The Answer to the Ultimate Question of Life, The Universe, and Everything is 42.")
def get_tangents(left_hell, right_hell):
#here starts get upper tangent
left_indices = left_hell.pop() #get indices from the list
right_indices = right_hell.pop()
left_proces_mark = left_hell.pop()
right_proces_mark = right_hell.pop()
left_hell_most_right_index = left_indices[1]
right_hell_most_left_index = right_indices[0]
num_of_left_points = len(left_hell)
num_of_right_points = len(right_hell)
x_of_line = float((left_hell[left_hell_most_right_index][0] +\
right_hell[right_hell_most_left_index][0])/2.0)
# if base case then order the points clockwise
if num_of_left_points < 4 and left_proces_mark == 0:
if num_of_left_points > 2: #exactly 3 points then - do some magic please
if (get_y_intersection(left_hell[0],left_hell[2],left_hell[1][0]) < left_hell[1][1]):
left_hell = [left_hell[2],left_hell[0],left_hell[1]]
left_indices = [1,0]
left_hell_most_right_index = 0
left_proces_mark == 1
#this created clockwise order from right, bottom and up
else:
left_hell = [left_hell[2],left_hell[1],left_hell[0]]
left_indices = [2,0]
left_hell_most_right_index = 0
left_proces_mark == 1
else: #exactly 2 points
left_hell = [left_hell[1],left_hell[0]]
left_indices = [1,0]
left_hell_most_right_index = 0
left_proces_mark == 1
if num_of_right_points < 4 and right_proces_mark == 0:
if num_of_right_points > 2: # here must be different num for same clockwise
if (get_y_intersection(right_hell[0],right_hell[2],right_hell[1][0]) < right_hell[1][1]):
right_hell = [right_hell[0],right_hell[1],right_hell[2]]
right_indices = [0,2]
right_hell_most_left_index = 0
right_proces_mark = 1
else:
right_hell = [right_hell[0],right_hell[2],right_hell[1]]
right_indices = [0,1]
right_hell_most_left_index = 0
right_proces_mark = 1
else:
right_indices = [0,1]
right_hell_most_left_index = 0
right_proces_mark = 1
#now the arrays are ordered clockwise form the middle line
i = left_hell_most_right_index # the most right point on X in left hell
j = right_hell_most_left_index #the most left point on X in right hell
while (get_y_intersection(left_hell[i],right_hell[(j+1)%num_of_right_points], x_of_line) >
get_y_intersection(left_hell[i],right_hell[j], x_of_line)
or get_y_intersection(left_hell[(i-1)%num_of_left_points],right_hell[j], x_of_line) >
get_y_intersection(left_hell[i],right_hell[j], x_of_line)):
if (get_y_intersection(left_hell[i],right_hell[(j+1)%num_of_right_points], x_of_line) >
get_y_intersection(left_hell[i],right_hell[j], x_of_line)):
j = (j+1)%num_of_right_points #move right
# mod is there because after iterate to the last point in array
# we want to return to the 0 index, so the last point is connected with the first
else:
i = (i-1)%num_of_left_points #move left
# mod is there because after iterate to the last point in array
# we want to return to the 0 index, so the last point is connected with the first
pass
upper_tangent = [i,j] #array of indices which represent points coords.
#here starts get lower tangent
i = left_hell_most_right_index # reset the most right point on X
j = right_hell_most_left_index # reset the most left point on X
while (get_y_intersection(left_hell[i],right_hell[(j-1)%num_of_right_points], x_of_line) <
get_y_intersection(left_hell[i],right_hell[j], x_of_line)
or get_y_intersection(left_hell[(i+1)%num_of_left_points],right_hell[j], x_of_line) <
get_y_intersection(left_hell[i],right_hell[j], x_of_line)):
if (get_y_intersection(left_hell[i],right_hell[(j-1)%num_of_right_points], x_of_line) <
get_y_intersection(left_hell[i],right_hell[j], x_of_line)):
j = (j-1)%num_of_right_points #move right
# mod is there because after iterate to the last point in array
# we want to return to the 0 index, so the last point is connected with the first
else:
i = (i+1)%num_of_left_points #move left
lower_tangent = [i,j] #array of two indices which represent points coords.
tangents = [upper_tangent,lower_tangent]
indices = [left_indices[0], right_indices[1]] # send further left, right points
right_hell.append(right_proces_mark)
left_hell.append(left_proces_mark)
result = [left_hell,right_hell,tangents,indices]
return result
def merge_convex_hells(left_convex, right_convex, tangents, indices):
upper_tangent = tangents[0] #extract upper tangent
lower_tangent = tangents[1] #extract lower tangent
left_proces_mark = left_convex.pop()
right_proces_mark = right_convex.pop()
num_of_left_points = len(left_convex)
num_of_right_points = len(right_convex)
most_left = indices[0]
most_right = indices[1]
if most_left == upper_tangent[0]: # if the upper tangent is most left point
new_convex = [left_convex[upper_tangent[0]]]
most_left = 0
else:
new_convex = [left_convex[upper_tangent[0]]]
next_point = upper_tangent[1]
if indices[1] == next_point: # if the upper tangent is most right point
most_right = 1
if next_point != lower_tangent[1]:
new_convex.append(right_convex[upper_tangent[1]])
next_point = (upper_tangent[1]+1)%num_of_right_points
if indices[1] == next_point:
most_right = 2
while next_point != lower_tangent[1]:
new_convex.append(right_convex[next_point])
if indices[1] == next_point:
most_right = len(new_convex)-1
next_point = (next_point + 1)%num_of_right_points
new_convex.append(right_convex[next_point]) # if ther is only one point for upper and lower tangents
if indices[1] == next_point:
most_right = len(new_convex)-1
#then append the right lower bound
next_point = lower_tangent[0]
if next_point != upper_tangent[0]:
new_convex.append(left_convex[lower_tangent[0]])
if indices[0] == next_point:
most_left = len(new_convex)-1
next_point = (lower_tangent[0] + 1)%num_of_left_points
while next_point != upper_tangent[0]:
new_convex.append(left_convex[next_point])
if indices[0] == next_point:
most_left = len(new_convex)-1
next_point = (next_point + 1)%num_of_left_points
new_convex.append(1)
new_convex.append([most_left, most_right])
return new_convex #this is new convex hell without inner points
def make_convex_hell(array):
#array comes already sorted based on X coordinates
try:
if len(array) < 4:
most_left = 0 #initialize with some index, cannot be 0
most_right = len(array)-1 #initialize with some index, cannot be 0
indices = [most_left, most_right]
array.append(0) #mark it that it will be processed
array.append(indices) #at the end append indices of the left and right points
return array #this is base case when 3 points are convex hell
else:
length = len(array)
left_convex = make_convex_hell(array[0:length // 2])
right_convex = make_convex_hell(array[length // 2:])
result = get_tangents(left_convex, right_convex)
indices = result.pop()
tangents = result.pop()
right_hell = result.pop()
left_hell = result.pop()
new_convex_hell = merge_convex_hells(left_hell, right_hell, tangents, indices)
return new_convex_hell
except ValueError:
print("The convex hull function takes array of arrays with two integer inside.")
try:
#generate random, but uniq points no x and y coordinates are same
uniq_points = gen_rand_uniq_points(9000,9000,500)
#sort based on x coordinate
uniq_points = sorted(uniq_points, key=itemgetter(0))
start = time.clock()
convex_hell = make_convex_hell(uniq_points)
elapsed = time.clock()
print('Running time was: ', elapsed - start)
indices = convex_hell.pop()
proces_mark = convex_hell.pop()
#print("indices at the end ", indices)
#print ("convex_hell ", convex_hell)
x_points = []
y_points = []
plt.axis([-100, 10000, -100, 10000])
for point in uniq_points:
plt.plot(point[0],point[1], 'ro')
for point in convex_hell:
x_points.append(point[0])
y_points.append(point[1])
x_points.append(convex_hell[0][0])
y_points.append(convex_hell[0][1])
plt.plot(x_points,y_points)
plt.show()
except KeyboardInterrupt:
print (" Quit")
|
mit
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/numpy/lib/tests/test_function_base.py
|
1
|
122243
|
from __future__ import division, absolute_import, print_function
import operator
import warnings
import sys
import decimal
import types
from fractions import Fraction
import pytest
import numpy as np
from numpy import ma
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,
assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,
)
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,
select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
)
from numpy.compat import long
PY2 = sys.version_info[0] == 2
def get_mat(n):
data = np.arange(n)
data = np.add.outer(data, data)
return data
def _make_complex(real, imag):
"""
Like real + 1j * imag, but behaves as expected when imag contains non-finite
values
"""
ret = np.zeros(np.broadcast(real, imag).shape, np.complex_)
ret.real = real
ret.imag = imag
return ret
class TestRot90(object):
def test_basic(self):
assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(-2,1))
a = [[0, 1, 2],
[3, 4, 5]]
b1 = [[2, 5],
[1, 4],
[0, 3]]
b2 = [[5, 4, 3],
[2, 1, 0]]
b3 = [[3, 0],
[4, 1],
[5, 2]]
b4 = [[0, 1, 2],
[3, 4, 5]]
for k in range(-3, 13, 4):
assert_equal(rot90(a, k=k), b1)
for k in range(-2, 13, 4):
assert_equal(rot90(a, k=k), b2)
for k in range(-1, 13, 4):
assert_equal(rot90(a, k=k), b3)
for k in range(0, 13, 4):
assert_equal(rot90(a, k=k), b4)
assert_equal(rot90(rot90(a, axes=(0,1)), axes=(1,0)), a)
assert_equal(rot90(a, k=1, axes=(1,0)), rot90(a, k=-1, axes=(0,1)))
def test_axes(self):
a = np.ones((50, 40, 3))
assert_equal(rot90(a).shape, (40, 50, 3))
assert_equal(rot90(a, axes=(0,2)), rot90(a, axes=(0,-1)))
assert_equal(rot90(a, axes=(1,2)), rot90(a, axes=(-2,-1)))
def test_rotation_axes(self):
a = np.arange(8).reshape((2,2,2))
a_rot90_01 = [[[2, 3],
[6, 7]],
[[0, 1],
[4, 5]]]
a_rot90_12 = [[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]]
a_rot90_20 = [[[4, 0],
[6, 2]],
[[5, 1],
[7, 3]]]
a_rot90_10 = [[[4, 5],
[0, 1]],
[[6, 7],
[2, 3]]]
assert_equal(rot90(a, axes=(0, 1)), a_rot90_01)
assert_equal(rot90(a, axes=(1, 0)), a_rot90_10)
assert_equal(rot90(a, axes=(1, 2)), a_rot90_12)
for k in range(1,5):
assert_equal(rot90(a, k=k, axes=(2, 0)),
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
class TestFlip(object):
def test_axes(self):
assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=2)
assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=-3)
assert_raises(np.AxisError, np.flip, np.ones((4, 4)), axis=(0, 3))
def test_basic_lr(self):
a = get_mat(4)
b = a[:, ::-1]
assert_equal(np.flip(a, 1), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[2, 1, 0],
[5, 4, 3]]
assert_equal(np.flip(a, 1), b)
def test_basic_ud(self):
a = get_mat(4)
b = a[::-1, :]
assert_equal(np.flip(a, 0), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[3, 4, 5],
[0, 1, 2]]
assert_equal(np.flip(a, 0), b)
def test_3d_swap_axis0(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
assert_equal(np.flip(a, 0), b)
def test_3d_swap_axis1(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
assert_equal(np.flip(a, 1), b)
def test_3d_swap_axis2(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
b = np.array([[[1, 0],
[3, 2]],
[[5, 4],
[7, 6]]])
assert_equal(np.flip(a, 2), b)
def test_4d(self):
a = np.arange(2 * 3 * 4 * 5).reshape(2, 3, 4, 5)
for i in range(a.ndim):
assert_equal(np.flip(a, i),
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
def test_default_axis(self):
a = np.array([[1, 2, 3],
[4, 5, 6]])
b = np.array([[6, 5, 4],
[3, 2, 1]])
assert_equal(np.flip(a), b)
def test_multiple_axes(self):
a = np.array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
assert_equal(np.flip(a, axis=()), a)
b = np.array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
assert_equal(np.flip(a, axis=(0, 2)), b)
c = np.array([[[3, 2],
[1, 0]],
[[7, 6],
[5, 4]]])
assert_equal(np.flip(a, axis=(1, 2)), c)
class TestAny(object):
def test_basic(self):
y1 = [0, 0, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 0, 1, 0]
assert_(np.any(y1))
assert_(np.any(y3))
assert_(not np.any(y2))
def test_nd(self):
y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]]
assert_(np.any(y1))
assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0])
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
class TestAll(object):
def test_basic(self):
y1 = [0, 1, 1, 0]
y2 = [0, 0, 0, 0]
y3 = [1, 1, 1, 1]
assert_(not np.all(y1))
assert_(np.all(y3))
assert_(not np.all(y2))
assert_(np.all(~np.array(y2)))
def test_nd(self):
y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]]
assert_(not np.all(y1))
assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1])
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
class TestCopy(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
a_copy = np.copy(a)
assert_array_equal(a, a_copy)
a_copy[0, 0] = 10
assert_equal(a[0, 0], 1)
assert_equal(a_copy[0, 0], 10)
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
# github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 # noqa
a = np.array([[1, 2], [3, 4]])
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
a_fort = np.array([[1, 2], [3, 4]], order="F")
assert_(not a_fort.flags.c_contiguous)
assert_(a_fort.flags.f_contiguous)
a_copy = np.copy(a)
assert_(a_copy.flags.c_contiguous)
assert_(not a_copy.flags.f_contiguous)
a_fort_copy = np.copy(a_fort)
assert_(not a_fort_copy.flags.c_contiguous)
assert_(a_fort_copy.flags.f_contiguous)
class TestAverage(object):
def test_basic(self):
y1 = np.array([1, 2, 3])
assert_(average(y1, axis=0) == 2.)
y2 = np.array([1., 2., 3.])
assert_(average(y2, axis=0) == 2.)
y3 = [0., 0., 0.]
assert_(average(y3, axis=0) == 0.)
y4 = np.ones((4, 4))
y4[0, 1] = 0
y4[1, 0] = 2
assert_almost_equal(y4.mean(0), average(y4, 0))
assert_almost_equal(y4.mean(1), average(y4, 1))
y5 = rand(5, 5)
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
actual = average(y, weights=w)
desired = (np.arange(10) ** 2).sum() * 1. / np.arange(10).sum()
assert_almost_equal(actual, desired)
y1 = np.array([[1, 2, 3], [4, 5, 6]])
w0 = [1, 2]
actual = average(y1, weights=w0, axis=0)
desired = np.array([3., 4., 5.])
assert_almost_equal(actual, desired)
w1 = [0, 0, 1]
actual = average(y1, weights=w1, axis=1)
desired = np.array([3., 6.])
assert_almost_equal(actual, desired)
# This should raise an error. Can we test for that ?
# assert_equal(average(y1, weights=w1), 9./2.)
# 2D Case
w2 = [[0, 0, 1], [0, 0, 2]]
desired = np.array([3., 6.])
assert_array_equal(average(y1, weights=w2, axis=1), desired)
assert_equal(average(y1, weights=w2), 5.)
y3 = rand(5).astype(np.float32)
w3 = rand(5).astype(np.float64)
assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))
def test_returned(self):
y = np.array([[1, 2, 3], [4, 5, 6]])
# No weights
avg, scl = average(y, returned=True)
assert_equal(scl, 6.)
avg, scl = average(y, 0, returned=True)
assert_array_equal(scl, np.array([2., 2., 2.]))
avg, scl = average(y, 1, returned=True)
assert_array_equal(scl, np.array([3., 3.]))
# With weights
w0 = [1, 2]
avg, scl = average(y, weights=w0, axis=0, returned=True)
assert_array_equal(scl, np.array([3., 3., 3.]))
w1 = [1, 2, 3]
avg, scl = average(y, weights=w1, axis=1, returned=True)
assert_array_equal(scl, np.array([6., 6.]))
w2 = [[0, 0, 1], [1, 2, 3]]
avg, scl = average(y, weights=w2, axis=1, returned=True)
assert_array_equal(scl, np.array([1., 6.]))
def test_subclasses(self):
class subclass(np.ndarray):
pass
a = np.array([[1,2],[3,4]]).view(subclass)
w = np.array([[1,2],[3,4]]).view(subclass)
assert_equal(type(np.average(a)), subclass)
assert_equal(type(np.average(a, weights=w)), subclass)
def test_upcasting(self):
typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
for at, wt, rt in typs:
a = np.array([[1,2],[3,4]], dtype=at)
w = np.array([[1,2],[3,4]], dtype=wt)
assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
def test_object_dtype(self):
a = np.array([decimal.Decimal(x) for x in range(10)])
w = np.array([decimal.Decimal(1) for _ in range(10)])
w /= w.sum()
assert_almost_equal(a.mean(0), average(a, weights=w))
class TestSelect(object):
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
conditions = [np.array([False, False, False]),
np.array([False, True, False]),
np.array([False, False, True])]
def _select(self, cond, values, default=0):
output = []
for m in range(len(cond)):
output += [V[m] for V, C in zip(values, cond) if C[m]] or [default]
return output
def test_basic(self):
choices = self.choices
conditions = self.conditions
assert_array_equal(select(conditions, choices, default=15),
self._select(conditions, choices, default=15))
assert_equal(len(choices), 3)
assert_equal(len(conditions), 3)
def test_broadcasting(self):
conditions = [np.array(True), np.array([False, True, False])]
choices = [1, np.arange(12).reshape(4, 3)]
assert_array_equal(select(conditions, choices), np.ones((4, 3)))
# default can broadcast too:
assert_equal(select([True], [0], default=[0]).shape, (1,))
def test_return_dtype(self):
assert_equal(select(self.conditions, self.choices, 1j).dtype,
np.complex_)
# But the conditions need to be stronger then the scalar default
# if it is scalar.
choices = [choice.astype(np.int8) for choice in self.choices]
assert_equal(select(self.conditions, choices).dtype, np.int8)
d = np.array([1, 2, 3, np.nan, 5, 7])
m = np.isnan(d)
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_deprecated_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
assert_equal(select([], [], 3j), 3j)
with warnings.catch_warnings():
warnings.simplefilter("always")
assert_warns(DeprecationWarning, select, [], [])
warnings.simplefilter("error")
assert_raises(DeprecationWarning, select, [], [])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
with warnings.catch_warnings():
warnings.filterwarnings("always")
conditions[0] = conditions[0].astype(np.int_)
assert_warns(DeprecationWarning, select, conditions, choices)
conditions[0] = conditions[0].astype(np.uint8)
assert_warns(DeprecationWarning, select, conditions, choices)
warnings.filterwarnings("error")
assert_raises(DeprecationWarning, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
conditions = [np.array([False])] * 100
choices = [np.array([1])] * 100
select(conditions, choices)
class TestInsert(object):
def test_basic(self):
a = [1, 2, 3]
assert_equal(insert(a, 0, 1), [1, 1, 2, 3])
assert_equal(insert(a, 3, 1), [1, 2, 3, 1])
assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3])
assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9])
assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3])
assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9])
b = np.array([0, 1], dtype=np.float64)
assert_equal(insert(b, 0, b[0]), [0., 0., 1.])
assert_equal(insert(b, [], []), b)
# Bools will be treated differently in the future:
# assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_equal(
insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3])
assert_(w[0].category is FutureWarning)
def test_multidim(self):
a = [[1, 1, 1]]
r = [[2, 2, 2],
[1, 1, 1]]
assert_equal(insert(a, 0, [1]), [1, 1, 1, 1])
assert_equal(insert(a, 0, [2, 2, 2], axis=0), r)
assert_equal(insert(a, 0, 2, axis=0), r)
assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]])
a = np.array([[1, 1], [2, 2], [3, 3]])
b = np.arange(1, 4).repeat(3).reshape(3, 3)
c = np.concatenate(
(a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T,
a[:, 1:2]), axis=1)
assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b)
assert_equal(insert(a, [1], [1, 2, 3], axis=1), c)
# scalars behave differently, in this case exactly opposite:
assert_equal(insert(a, 1, [1, 2, 3], axis=1), b)
assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c)
a = np.arange(4).reshape(2, 2)
assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a)
assert_equal(insert(a[:1,:], 1, a[1,:], axis=0), a)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:,:, 3], axis=-1),
insert(a, 1, a[:,:, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2,:], axis=-2),
insert(a, 1, a[:, 2,:], axis=1))
# invalid axis value
assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3)
assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4)
# negative axis value
a = np.arange(24).reshape((2, 3, 4))
assert_equal(insert(a, 1, a[:, :, 3], axis=-1),
insert(a, 1, a[:, :, 3], axis=2))
assert_equal(insert(a, 1, a[:, 2, :], axis=-2),
insert(a, 1, a[:, 2, :], axis=1))
def test_0d(self):
# This is an error in the future
a = np.array(1)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(insert(a, [], 2, axis=0), np.array(2))
assert_(w[0].category is DeprecationWarning)
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = np.arange(10).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
assert_(isinstance(np.insert(a, [], []), SubClass))
assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass))
assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass))
# This is an error in the future:
a = np.array(1).view(SubClass)
assert_(isinstance(np.insert(a, 0, [0]), SubClass))
def test_index_array_copied(self):
x = np.array([1, 1, 1])
np.insert([0, 1, 2], x, [3, 4, 5])
assert_equal(x, np.array([1, 1, 1]))
def test_structured_array(self):
a = np.array([(1, 'a'), (2, 'b'), (3, 'c')],
dtype=[('foo', 'i'), ('bar', 'a1')])
val = (4, 'd')
b = np.insert(a, 0, val)
assert_array_equal(b[0], np.array(val, dtype=b.dtype))
val = [(4, 'd')] * 2
b = np.insert(a, [0, 2], val)
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
class TestAmax(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amax(a), 10.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0])
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
class TestAmin(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.amin(a), -5.0)
b = [[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]]
assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0])
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
class TestPtp(object):
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
assert_equal(a.ptp(axis=0), 15.0)
b = np.array([[3, 6.0, 9.0],
[4, 10.0, 5.0],
[8, 3.0, 2.0]])
assert_equal(b.ptp(axis=0), [5.0, 7.0, 7.0])
assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
assert_equal(b.ptp(axis=0, keepdims=True), [[5.0, 7.0, 7.0]])
assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])
class TestCumsum(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
np.uint32, np.float32, np.float64, np.complex64,
np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
assert_array_equal(np.cumsum(a, axis=0), tgt)
tgt = np.array(
[[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
assert_array_equal(np.cumsum(a2, axis=0), tgt)
tgt = np.array(
[[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
assert_array_equal(np.cumsum(a2, axis=1), tgt)
class TestProd(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, np.prod, a)
assert_raises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
class TestCumprod(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
assert_raises(ArithmeticError, np.cumprod, a)
assert_raises(ArithmeticError, np.cumprod, a2, 1)
assert_raises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
1320, 6600, 26400], ctype))
assert_array_equal(np.cumprod(a2, axis=0),
np.array([[1, 2, 3, 4],
[5, 12, 21, 36],
[50, 36, 84, 180]], ctype))
assert_array_equal(np.cumprod(a2, axis=-1),
np.array([[1, 2, 6, 24],
[5, 30, 210, 1890],
[10, 30, 120, 600]], ctype))
class TestDiff(object):
def test_basic(self):
x = [1, 4, 6, 7, 12]
out = np.array([3, 2, 1, 5])
out2 = np.array([-1, -1, 4])
out3 = np.array([0, 5])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
x = [1.1, 2.2, 3.0, -0.2, -0.1]
out = np.array([1.1, 0.8, -3.2, 0.1])
assert_almost_equal(diff(x), out)
x = [True, True, False, False]
out = np.array([False, True, False])
out2 = np.array([True, True])
assert_array_equal(diff(x), out)
assert_array_equal(diff(x, n=2), out2)
def test_axis(self):
x = np.zeros((10, 20, 30))
x[:, 1::2, :] = 1
exp = np.ones((10, 19, 30))
exp[:, 1::2, :] = -1
assert_array_equal(diff(x), np.zeros((10, 20, 29)))
assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
assert_array_equal(diff(x, axis=1), exp)
assert_array_equal(diff(x, axis=-2), exp)
assert_raises(np.AxisError, diff, x, axis=3)
assert_raises(np.AxisError, diff, x, axis=-4)
x = np.array(1.11111111111, np.float64)
assert_raises(ValueError, diff, x)
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
out2 = out1[:, :, 1:] - out1[:, :, :-1]
out3 = x[1:, :, :] - x[:-1, :, :]
out4 = out3[1:, :, :] - out3[:-1, :, :]
assert_array_equal(diff(x), out1)
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
def test_n(self):
x = list(range(3))
assert_raises(ValueError, diff, x, n=-1)
output = [diff(x, n=n) for n in range(1, 5)]
expected = [[1, 1], [0], [], []]
assert_(diff(x, n=0) is x)
for n, (expected, out) in enumerate(zip(expected, output), start=1):
assert_(type(out) is np.ndarray)
assert_array_equal(out, expected)
assert_equal(out.dtype, np.int_)
assert_equal(len(out), max(0, len(x) - n))
def test_times(self):
x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
expected = [
np.array([1, 1], dtype='timedelta64[D]'),
np.array([0], dtype='timedelta64[D]'),
]
expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
for n, exp in enumerate(expected, start=1):
out = diff(x, n=n)
assert_array_equal(out, exp)
assert_equal(out.dtype, exp.dtype)
def test_subclass(self):
x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
mask=[[False, False], [True, False],
[False, True], [True, True], [False, False]])
out = diff(x)
assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
assert_array_equal(out.mask, [[False], [True],
[True], [True], [False]])
assert_(type(out) is type(x))
out3 = diff(x, n=3)
assert_array_equal(out3.data, [[], [], [], [], []])
assert_array_equal(out3.mask, [[], [], [], [], []])
assert_(type(out3) is type(x))
def test_prepend(self):
x = np.arange(5) + 1
assert_array_equal(diff(x, prepend=0), np.ones(5))
assert_array_equal(diff(x, prepend=[0]), np.ones(5))
assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)
assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))
x = np.arange(4).reshape(2, 2)
result = np.diff(x, axis=1, prepend=0)
expected = [[0, 1], [2, 1]]
assert_array_equal(result, expected)
result = np.diff(x, axis=1, prepend=[[0], [0]])
assert_array_equal(result, expected)
result = np.diff(x, axis=0, prepend=0)
expected = [[0, 1], [2, 2]]
assert_array_equal(result, expected)
result = np.diff(x, axis=0, prepend=[[0, 0]])
assert_array_equal(result, expected)
assert_raises(ValueError, np.diff, x, prepend=np.zeros((3,3)))
assert_raises(np.AxisError, diff, x, prepend=0, axis=3)
def test_append(self):
x = np.arange(5)
result = diff(x, append=0)
expected = [1, 1, 1, 1, -4]
assert_array_equal(result, expected)
result = diff(x, append=[0])
assert_array_equal(result, expected)
result = diff(x, append=[0, 2])
expected = expected + [2]
assert_array_equal(result, expected)
x = np.arange(4).reshape(2, 2)
result = np.diff(x, axis=1, append=0)
expected = [[1, -1], [1, -3]]
assert_array_equal(result, expected)
result = np.diff(x, axis=1, append=[[0], [0]])
assert_array_equal(result, expected)
result = np.diff(x, axis=0, append=0)
expected = [[2, 2], [-2, -3]]
assert_array_equal(result, expected)
result = np.diff(x, axis=0, append=[[0, 0]])
assert_array_equal(result, expected)
assert_raises(ValueError, np.diff, x, append=np.zeros((3,3)))
assert_raises(np.AxisError, diff, x, append=0, axis=3)
class TestDelete(object):
def setup(self):
self.a = np.arange(5)
self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
def _check_inverse_of_slicing(self, indices):
a_del = delete(self.a, indices)
nd_a_del = delete(self.nd_a, indices, axis=1)
msg = 'Delete failed for obj: %r' % indices
# NOTE: The cast should be removed after warning phase for bools
if not isinstance(indices, (slice, int, long, np.integer)):
indices = np.asarray(indices, dtype=np.intp)
indices = indices[(indices >= 0) & (indices < 5)]
assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a,
err_msg=msg)
xor = setxor1d(nd_a_del[0,:, 0], self.nd_a[0, indices, 0])
assert_array_equal(xor, self.nd_a[0,:, 0], err_msg=msg)
def test_slices(self):
lims = [-6, -2, 0, 1, 2, 4, 5]
steps = [-3, -1, 1, 3]
for start in lims:
for stop in lims:
for step in steps:
s = slice(start, stop, step)
self._check_inverse_of_slicing(s)
def test_fancy(self):
# Deprecation/FutureWarning tests should be kept after change.
self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]]))
with warnings.catch_warnings():
warnings.filterwarnings('error', category=DeprecationWarning)
assert_raises(DeprecationWarning, delete, self.a, [100])
assert_raises(DeprecationWarning, delete, self.a, [-100])
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', category=FutureWarning)
self._check_inverse_of_slicing([0, -1, 2, 2])
obj = np.array([True, False, False], dtype=bool)
self._check_inverse_of_slicing(obj)
assert_(w[0].category is FutureWarning)
assert_(w[1].category is FutureWarning)
def test_single(self):
self._check_inverse_of_slicing(0)
self._check_inverse_of_slicing(-4)
def test_0d(self):
a = np.array(1)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(delete(a, [], axis=0), a)
assert_(w[0].category is DeprecationWarning)
def test_subclass(self):
class SubClass(np.ndarray):
pass
a = self.a.view(SubClass)
assert_(isinstance(delete(a, 0), SubClass))
assert_(isinstance(delete(a, []), SubClass))
assert_(isinstance(delete(a, [0, 1]), SubClass))
assert_(isinstance(delete(a, slice(1, 2)), SubClass))
assert_(isinstance(delete(a, slice(1, -2)), SubClass))
def test_array_order_preserve(self):
# See gh-7113
k = np.arange(10).reshape(2, 5, order='F')
m = delete(k, slice(60, None), axis=1)
# 'k' is Fortran ordered, and 'm' should have the
# same ordering as 'k' and NOT become C ordered
assert_equal(m.flags.c_contiguous, k.flags.c_contiguous)
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
class TestGradient(object):
def test_basic(self):
v = [[1, 1], [3, 4]]
x = np.array(v)
dx = [np.array([[2., 3.], [2., 3.]]),
np.array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
def test_args(self):
dx = np.cumsum(np.ones(5))
dx_uneven = [1., 2., 5., 9., 11.]
f_2d = np.arange(25).reshape(5, 5)
# distances must be scalars or have size equal to gradient[axis]
gradient(np.arange(5), 3.)
gradient(np.arange(5), np.array(3.))
gradient(np.arange(5), dx)
# dy is set equal to dx because scalar
gradient(f_2d, 1.5)
gradient(f_2d, np.array(1.5))
gradient(f_2d, dx_uneven, dx_uneven)
# mix between even and uneven spaces and
# mix between scalar and vector
gradient(f_2d, dx, 2)
# 2D but axis specified
gradient(f_2d, dx, axis=1)
# 2d coordinate arguments are not yet allowed
assert_raises_regex(ValueError, '.*scalars or 1d',
gradient, f_2d, np.stack([dx]*2, axis=-1), 1)
def test_badargs(self):
f_2d = np.arange(25).reshape(5, 5)
x = np.cumsum(np.ones(5))
# wrong sizes
assert_raises(ValueError, gradient, f_2d, x, np.ones(2))
assert_raises(ValueError, gradient, f_2d, 1, np.ones(2))
assert_raises(ValueError, gradient, f_2d, np.ones(2), np.ones(2))
# wrong number of arguments
assert_raises(TypeError, gradient, f_2d, x)
assert_raises(TypeError, gradient, f_2d, x, axis=(0,1))
assert_raises(TypeError, gradient, f_2d, x, x, x)
assert_raises(TypeError, gradient, f_2d, 1, 1, 1)
assert_raises(TypeError, gradient, f_2d, x, x, axis=1)
assert_raises(TypeError, gradient, f_2d, 1, 1, axis=1)
def test_datetime64(self):
# Make sure gradient() can handle special types like datetime64
x = np.array(
['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12',
'1910-10-12', '1910-12-12', '1912-12-12'],
dtype='datetime64[D]')
dx = np.array(
[-5, -3, 0, 31, 61, 396, 731],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
x = np.ma.array([[1, 1], [3, 4]],
mask=[[False, False], [False, False]])
out = gradient(x)[0]
assert_equal(type(out), type(x))
# And make sure that the output and input don't have aliased mask
# arrays
assert_(x._mask is not out._mask)
# Also check that edge_order=2 doesn't alter the original mask
x2 = np.ma.arange(5)
x2[2] = np.ma.masked
np.gradient(x2, edge_order=2)
assert_array_equal(x2.mask, [False, False, True, False, False])
def test_second_order_accurate(self):
# Testing that the relative numerical error is less that 3% for
# this example problem. This corresponds to second order
# accurate finite differences for all interior and boundary
# points.
x = np.linspace(0, 1, 10)
dx = x[1] - x[0]
y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
analytical = 6 * x ** 2 + 8 * x + 2
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
# test with unevenly spaced
np.random.seed(0)
x = np.sort(np.random.random(10))
y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
analytical = 6 * x ** 2 + 8 * x + 2
num_error = np.abs((np.gradient(y, x, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
def test_spacing(self):
f = np.array([0, 2., 3., 4., 5., 5.])
f = np.tile(f, (6,1)) + f.reshape(-1, 1)
x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
x_even = np.arange(6.)
fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
# evenly spaced
for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
res2 = gradient(f, x_even, x_even,
axis=(0,1), edge_order=edge_order)
res3 = gradient(f, x_even, x_even,
axis=None, edge_order=edge_order)
assert_array_equal(res1, res2)
assert_array_equal(res2, res3)
assert_almost_equal(res1[0], exp_res.T)
assert_almost_equal(res1[1], exp_res)
res1 = gradient(f, 1., axis=0, edge_order=edge_order)
res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_almost_equal(res2, exp_res.T)
res1 = gradient(f, 1., axis=1, edge_order=edge_order)
res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_array_equal(res2, exp_res)
# unevenly spaced
for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
res1 = gradient(f, x_uneven, x_uneven,
axis=(0,1), edge_order=edge_order)
res2 = gradient(f, x_uneven, x_uneven,
axis=None, edge_order=edge_order)
assert_array_equal(res1, res2)
assert_almost_equal(res1[0], exp_res.T)
assert_almost_equal(res1[1], exp_res)
res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
assert_almost_equal(res1, exp_res.T)
res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
assert_almost_equal(res1, exp_res)
# mixed
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
assert_array_equal(res1[0], res2[1])
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord1.T)
assert_almost_equal(res1[1], fdx_uneven_ord1)
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
assert_array_equal(res1[0], res2[1])
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord2.T)
assert_almost_equal(res1[1], fdx_uneven_ord2)
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
x = np.array(v)
dx = [np.array([[2., 3.], [2., 3.]]),
np.array([[0., 0.], [1., 1.]])]
assert_array_equal(gradient(x, axis=0), dx[0])
assert_array_equal(gradient(x, axis=1), dx[1])
assert_array_equal(gradient(x, axis=-1), dx[1])
assert_array_equal(gradient(x, axis=(1, 0)), [dx[1], dx[0]])
# test axis=None which means all axes
assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
# and is the same as no axis keyword given
assert_almost_equal(gradient(x, axis=None), gradient(x))
# test vararg order
assert_array_equal(gradient(x, 2, 3, axis=(1, 0)),
[dx[1]/2.0, dx[0]/3.0])
# test maximal number of varargs
assert_raises(TypeError, gradient, x, 1, 2, axis=1)
assert_raises(np.AxisError, gradient, x, axis=3)
assert_raises(np.AxisError, gradient, x, axis=-3)
# assert_raises(TypeError, gradient, x, axis=[1,])
def test_timedelta64(self):
# Make sure gradient() can handle special types like timedelta64
x = np.array(
[-5, -3, 10, 12, 61, 321, 300],
dtype='timedelta64[D]')
dx = np.array(
[2, 7, 7, 25, 154, 119, -21],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
def test_inexact_dtypes(self):
for dt in [np.float16, np.float32, np.float64]:
# dtypes should not be promoted in a different way to what diff does
x = np.array([1, 2, 3], dtype=dt)
assert_equal(gradient(x).dtype, np.diff(x).dtype)
def test_values(self):
# needs at least 2 points for edge_order ==1
gradient(np.arange(2), edge_order=1)
# needs at least 3 points for edge_order ==1
gradient(np.arange(3), edge_order=2)
assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
class TestAngle(object):
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
1, 1j, -1, -1j, 1 - 3j, -1 + 3j]
y = angle(x)
yo = [
np.arctan(3.0 / 1.0),
np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
-np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
z = angle(x, deg=1)
zo = np.array(yo) * 180 / np.pi
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
def test_subclass(self):
x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)])
x[1] = np.ma.masked
expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])
expected[1] = np.ma.masked
actual = angle(x)
assert_equal(type(actual), type(expected))
assert_equal(actual.mask, expected.mask)
assert_equal(actual, expected)
class TestTrimZeros(object):
"""
Only testing for integer splits.
"""
def test_basic(self):
a = np.array([0, 0, 1, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 2, 3, 4]))
def test_leading_skip(self):
a = np.array([0, 0, 1, 0, 2, 3, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 0, 2, 3, 4]))
def test_trailing_skip(self):
a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0])
res = trim_zeros(a)
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
class TestExtins(object):
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
b = extract(a > 1, a)
assert_array_equal(b, [3, 2, 2, 3, 3])
def test_place(self):
# Make sure that non-np.ndarray objects
# raise an error instead of doing nothing
assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1])
a = np.array([1, 4, 3, 2, 5, 8, 7])
place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6])
assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7])
place(a, np.zeros(7), [])
assert_array_equal(a, np.arange(1, 8))
place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9])
assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9])
assert_raises_regex(ValueError, "Cannot insert from an empty array",
lambda: place(a, [0, 0, 0, 0, 0, 1, 0], []))
# See Issue #6974
a = np.array(['12', '34'])
place(a, [0, 1], '9')
assert_array_equal(a, ['12', '9'])
def test_both(self):
a = rand(10)
mask = a > 0.5
ac = a.copy()
c = extract(mask, a)
place(a, mask, 0)
place(a, mask, c)
assert_array_equal(a, ac)
class TestVectorize(object):
def test_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_scalar(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract)
r = f([0, 3, 6, 9], 5)
assert_array_equal(r, [5, 8, 1, 4])
def test_large(self):
x = np.linspace(-3, 2, 10000)
f = vectorize(lambda x: x)
y = f(x)
assert_array_equal(y, x)
def test_ufunc(self):
import math
f = vectorize(math.cos)
args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi])
r1 = f(args)
r2 = np.cos(args)
assert_array_almost_equal(r1, r2)
def test_keywords(self):
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(args, 2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords_no_func_code(self):
# This needs to test a function that has keywords but
# no func_code attribute, since otherwise vectorize will
# inspect the func_code.
import random
try:
vectorize(random.randrange) # Should succeed
except Exception:
raise AssertionError()
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_keywords3_ticket_2100(self):
# Test excluded with mixed positional and kwargs: ticket 2100
def mypolyval(x, p):
_p = list(p)
res = _p.pop(0)
while _p:
res = res * x + _p.pop(0)
return res
vpolyval = np.vectorize(mypolyval, excluded=['p', 1])
ans = [3, 6]
assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3]))
assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3]))
def test_keywords4_ticket_2100(self):
# Test vectorizing function with no positional args.
@vectorize
def f(**kw):
res = 1.0
for _k in kw:
res *= kw[_k]
return res
assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8])
def test_keywords5_ticket_2100(self):
# Test vectorizing function with no kwargs args.
@vectorize
def f(*v):
return np.prod(v)
assert_array_equal(f([1, 2], [3, 4]), [3, 8])
def test_coverage1_ticket_2100(self):
def foo():
return 1
f = vectorize(foo)
assert_array_equal(f(), 1)
def test_assigning_docstring(self):
def foo(x):
"""Original documentation"""
return x
f = vectorize(foo)
assert_equal(f.__doc__, foo.__doc__)
doc = "Provided documentation"
f = vectorize(foo, doc=doc)
assert_equal(f.__doc__, doc)
def test_UnboundMethod_ticket_1156(self):
# Regression test for issue 1156
class Foo:
b = 2
def bar(self, a):
return a ** self.b
assert_array_equal(vectorize(Foo().bar)(np.arange(9)),
np.arange(9) ** 2)
assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)),
np.arange(9) ** 2)
def test_execution_order_ticket_1487(self):
# Regression test for dependence on execution order: issue 1487
f1 = vectorize(lambda x: x)
res1a = f1(np.arange(3))
res1b = f1(np.arange(0.1, 3))
f2 = vectorize(lambda x: x)
res2b = f2(np.arange(0.1, 3))
res2a = f2(np.arange(3))
assert_equal(res1a, res2a)
assert_equal(res1b, res2b)
def test_string_ticket_1892(self):
# Test vectorization over strings: issue 1892.
f = np.vectorize(lambda x: x)
s = '0123456789' * 10
assert_equal(s, f(s))
def test_cache(self):
# Ensure that vectorized func called exactly once per argument.
_calls = [0]
@vectorize
def f(x):
_calls[0] += 1
return x ** 2
f.cache = True
x = np.arange(5)
assert_array_equal(f(x), x * x)
assert_equal(_calls[0], len(x))
def test_otypes(self):
f = np.vectorize(lambda x: x)
f.otypes = 'i'
x = np.arange(5)
assert_array_equal(f(x), x)
def test_parse_gufunc_signature(self):
assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()]))
assert_equal(nfb._parse_gufunc_signature('(x,y)->()'),
([('x', 'y')], [()]))
assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'),
([('x',), ('y',)], [()]))
assert_equal(nfb._parse_gufunc_signature('(x)->(y)'),
([('x',)], [('y',)]))
assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'),
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x)(y)->()')
with assert_raises(ValueError):
nfb._parse_gufunc_signature('(x),(y)->')
with assert_raises(ValueError):
nfb._parse_gufunc_signature('((x))->(x)')
def test_signature_simple(self):
def addsubtract(a, b):
if a > b:
return a - b
else:
return a + b
f = vectorize(addsubtract, signature='(),()->()')
r = f([0, 3, 6, 9], [1, 3, 5, 7])
assert_array_equal(r, [1, 6, 1, 2])
def test_signature_mean_last(self):
def mean(a):
return a.mean()
f = vectorize(mean, signature='(n)->()')
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [2, 3])
def test_signature_center(self):
def center(a):
return a - a.mean()
f = vectorize(center, signature='(n)->(n)')
r = f([[1, 3], [2, 4]])
assert_array_equal(r, [[-1, 1], [-1, 1]])
def test_signature_two_outputs(self):
f = vectorize(lambda x: (x, x), signature='()->(),()')
r = f([1, 2, 3])
assert_(isinstance(r, tuple) and len(r) == 2)
assert_array_equal(r[0], [1, 2, 3])
assert_array_equal(r[1], [1, 2, 3])
def test_signature_outer(self):
f = vectorize(np.outer, signature='(a),(b)->(a,b)')
r = f([1, 2], [1, 2, 3])
assert_array_equal(r, [[1, 2, 3], [2, 4, 6]])
r = f([[[1, 2]]], [1, 2, 3])
assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]])
r = f([[1, 0], [2, 0]], [1, 2, 3])
assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]],
[[2, 4, 6], [0, 0, 0]]])
r = f([1, 2], [[1, 2, 3], [0, 0, 0]])
assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]],
[[0, 0, 0], [0, 0, 0]]])
def test_signature_computed_size(self):
f = vectorize(lambda x: x[:-1], signature='(n)->(m)')
r = f([1, 2, 3])
assert_array_equal(r, [1, 2])
r = f([[1, 2, 3], [2, 3, 4]])
assert_array_equal(r, [[1, 2], [2, 3]])
def test_signature_excluded(self):
def foo(a, b=1):
return a + b
f = vectorize(foo, signature='()->()', excluded={'b'})
assert_array_equal(f([1, 2, 3]), [2, 3, 4])
assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3])
def test_signature_otypes(self):
f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64'])
r = f([1, 2, 3])
assert_equal(r.dtype, np.dtype('float64'))
assert_array_equal(r, [1, 2, 3])
def test_signature_invalid_inputs(self):
f = vectorize(operator.add, signature='(n),(n)->(n)')
with assert_raises_regex(TypeError, 'wrong number of positional'):
f([1, 2])
with assert_raises_regex(
ValueError, 'does not have enough dimensions'):
f(1, 2)
with assert_raises_regex(
ValueError, 'inconsistent size for core dimension'):
f([1, 2], [1, 2, 3])
f = vectorize(operator.add, signature='()->()')
with assert_raises_regex(TypeError, 'wrong number of positional'):
f(1, 2)
def test_signature_invalid_outputs(self):
f = vectorize(lambda x: x[:-1], signature='(n)->(n)')
with assert_raises_regex(
ValueError, 'inconsistent size for core dimension'):
f([1, 2, 3])
f = vectorize(lambda x: x, signature='()->(),()')
with assert_raises_regex(ValueError, 'wrong number of outputs'):
f(1)
f = vectorize(lambda x: (x, x), signature='()->()')
with assert_raises_regex(ValueError, 'wrong number of outputs'):
f([1, 2])
def test_size_zero_output(self):
# see issue 5868
f = np.vectorize(lambda x: x)
x = np.zeros([0, 5], dtype=int)
with assert_raises_regex(ValueError, 'otypes'):
f(x)
f.otypes = 'i'
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='()->()')
with assert_raises_regex(ValueError, 'otypes'):
f(x)
f = np.vectorize(lambda x: x, signature='()->()', otypes='i')
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i')
assert_array_equal(f(x), x)
f = np.vectorize(lambda x: x, signature='(n)->(n)')
assert_array_equal(f(x.T), x.T)
f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i')
with assert_raises_regex(ValueError, 'new output dimensions'):
f(x)
class TestLeaks(object):
class A(object):
iters = 20
def bound(self, *args):
return 0
@staticmethod
def unbound(*args):
return 0
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
@pytest.mark.parametrize('name, incr', [
('bound', A.iters),
('unbound', 0),
])
def test_frompyfunc_leaks(self, name, incr):
# exposed in gh-11867 as np.vectorized, but the problem stems from
# frompyfunc.
# class.attribute = np.frompyfunc(<method>) creates a
# reference cycle if <method> is a bound class method. It requires a
# gc collection cycle to break the cycle (on CPython 3)
import gc
A_func = getattr(self.A, name)
gc.disable()
try:
refcount = sys.getrefcount(A_func)
for i in range(self.A.iters):
a = self.A()
a.f = np.frompyfunc(getattr(a, name), 1, 1)
out = a.f(np.arange(10))
a = None
if PY2:
assert_equal(sys.getrefcount(A_func), refcount)
else:
# A.func is part of a reference cycle if incr is non-zero
assert_equal(sys.getrefcount(A_func), refcount + incr)
for i in range(5):
gc.collect()
assert_equal(sys.getrefcount(A_func), refcount)
finally:
gc.enable()
class TestDigitize(object):
def test_forward(self):
x = np.arange(-6, 5)
bins = np.arange(-5, 5)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(5, -5, -1)
assert_array_equal(digitize(x, bins), np.arange(11))
def test_random(self):
x = rand(10)
bin = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bin) != 0))
def test_right_basic(self):
x = [1, 5, 4, 10, 8, 11, 0]
bins = [1, 5, 10]
default_answer = [1, 2, 1, 3, 2, 3, 0]
assert_array_equal(digitize(x, bins), default_answer)
right_answer = [0, 1, 1, 2, 2, 3, 0]
assert_array_equal(digitize(x, bins, True), right_answer)
def test_right_open(self):
x = np.arange(-6, 5)
bins = np.arange(-6, 4)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_reverse(self):
x = np.arange(5, -6, -1)
bins = np.arange(4, -6, -1)
assert_array_equal(digitize(x, bins, True), np.arange(11))
def test_right_open_random(self):
x = rand(10)
bins = np.linspace(x.min(), x.max(), 10)
assert_(np.all(digitize(x, bins, True) != 10))
def test_monotonic(self):
x = [-1, 0, 1, 2]
bins = [0, 0, 1]
assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3])
assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3])
bins = [1, 1, 0]
assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0])
assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0])
bins = [1, 1, 1, 1]
assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4])
assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4])
bins = [0, 0, 1, 0]
assert_raises(ValueError, digitize, x, bins)
bins = [1, 1, 0, 1]
assert_raises(ValueError, digitize, x, bins)
def test_casting_error(self):
x = [1, 2, 3 + 1.j]
bins = [1, 2, 3]
assert_raises(TypeError, digitize, x, bins)
x, bins = bins, x
assert_raises(TypeError, digitize, x, bins)
def test_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
assert_(not isinstance(digitize(b, a, False), A))
assert_(not isinstance(digitize(b, a, True), A))
def test_large_integers_increasing(self):
# gh-11022
x = 2**54 # loses precision in a float
assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
@pytest.mark.xfail(
reason="gh-11022: np.core.multiarray._monoticity loses precision")
def test_large_integers_decreasing(self):
# gh-11022
x = 2**54 # loses precision in a float
assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
class TestUnwrap(object):
def test_simple(self):
# check that unwrap removes jumps greater that 2*pi
assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1])
# check that unwrap maintains continuity
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
class TestFilterwindows(object):
def test_hanning(self):
# check symmetry
w = hanning(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
def test_hamming(self):
# check symmetry
w = hamming(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
def test_bartlett(self):
# check symmetry
w = bartlett(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
def test_blackman(self):
# check symmetry
w = blackman(10)
assert_array_almost_equal(w, flipud(w), 7)
# check known value
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
class TestTrapz(object):
def test_simple(self):
x = np.arange(-10, 10, .1)
r = trapz(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1)
# check integral of normal equals 1
assert_almost_equal(r, 1, 7)
def test_ndim(self):
x = np.linspace(0, 1, 3)
y = np.linspace(0, 2, 8)
z = np.linspace(0, 3, 13)
wx = np.ones_like(x) * (x[1] - x[0])
wx[0] /= 2
wx[-1] /= 2
wy = np.ones_like(y) * (y[1] - y[0])
wy[0] /= 2
wy[-1] /= 2
wz = np.ones_like(z) * (z[1] - z[0])
wz[0] /= 2
wz[-1] /= 2
q = x[:, None, None] + y[None,:, None] + z[None, None,:]
qx = (q * wx[:, None, None]).sum(axis=0)
qy = (q * wy[None, :, None]).sum(axis=1)
qz = (q * wz[None, None, :]).sum(axis=2)
# n-d `x`
r = trapz(q, x=x[:, None, None], axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y[None,:, None], axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z[None, None,:], axis=2)
assert_almost_equal(r, qz)
# 1-d `x`
r = trapz(q, x=x, axis=0)
assert_almost_equal(r, qx)
r = trapz(q, x=y, axis=1)
assert_almost_equal(r, qy)
r = trapz(q, x=z, axis=2)
assert_almost_equal(r, qz)
def test_masked(self):
# Testing that masked arrays behave as if the function is 0 where
# masked
x = np.arange(5)
y = x * x
mask = x == 2
ym = np.ma.array(y, mask=mask)
r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16))
assert_almost_equal(trapz(ym, x), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(ym, xm), r)
xm = np.ma.array(x, mask=mask)
assert_almost_equal(trapz(y, xm), r)
class TestSinc(object):
def test_simple(self):
assert_(sinc(0) == 1)
w = sinc(np.linspace(-1, 1, 100))
# check symmetry
assert_array_almost_equal(w, flipud(w), 7)
def test_array_like(self):
x = [0, 0.5]
y1 = sinc(np.array(x))
y2 = sinc(list(x))
y3 = sinc(tuple(x))
assert_array_equal(y1, y2)
assert_array_equal(y1, y3)
class TestUnique(object):
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
assert_(np.all(unique(x) == [0, 1, 2, 3, 4]))
assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1]))
x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham']
assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget']))
x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j])
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
class TestCheckFinite(object):
def test_simple(self):
a = [1, 2, 3]
b = [1, 2, np.inf]
c = [1, 2, np.nan]
np.lib.asarray_chkfinite(a)
assert_raises(ValueError, np.lib.asarray_chkfinite, b)
assert_raises(ValueError, np.lib.asarray_chkfinite, c)
def test_dtype_order(self):
# Regression test for missing dtype and order arguments
a = [1, 2, 3]
a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64)
assert_(a.dtype == np.float64)
class TestCorrCoef(object):
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
[0.9297531, 0.32296769, 0.19267156]])
B = np.array(
[[0.10377691, 0.5417086, 0.49807457],
[0.82872117, 0.77801674, 0.39226705],
[0.9314666, 0.66800209, 0.03538394]])
res1 = np.array(
[[1., 0.9379533, -0.04931983],
[0.9379533, 1., 0.30007991],
[-0.04931983, 0.30007991, 1.]])
res2 = np.array(
[[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523],
[0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386],
[-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601],
[0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113],
[0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823],
[0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]])
def test_non_array(self):
assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]),
[[1., -1.], [-1., 1.]])
def test_simple(self):
tgt1 = corrcoef(self.A)
assert_almost_equal(tgt1, self.res1)
assert_(np.all(np.abs(tgt1) <= 1.0))
tgt2 = corrcoef(self.A, self.B)
assert_almost_equal(tgt2, self.res2)
assert_(np.all(np.abs(tgt2) <= 1.0))
def test_ddof(self):
# ddof raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
sup.filter(DeprecationWarning)
# ddof has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
def test_bias(self):
# bias raises DeprecationWarning
with suppress_warnings() as sup:
warnings.simplefilter("always")
assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
sup.filter(DeprecationWarning)
# bias has no or negligible effect on the function
assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
res = corrcoef(x)
tgt = np.array([[1., -1.j], [1.j, 1.]])
assert_allclose(res, tgt)
assert_(np.all(np.abs(res) <= 1.0))
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(corrcoef(np.array([])), np.nan)
assert_array_equal(corrcoef(np.array([]).reshape(0, 2)),
np.array([]).reshape(0, 0))
assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
def test_extreme(self):
x = [[1e-100, 1e100], [1e100, 1e-100]]
with np.errstate(all='raise'):
c = corrcoef(x)
assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]]))
assert_(np.all(np.abs(c) <= 1.0))
class TestCov(object):
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
frequencies = np.array([1, 4, 1])
x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T
res2 = np.array([[0.4, -0.4], [-0.4, 0.4]])
unit_frequencies = np.ones(3, dtype=np.integer)
weights = np.array([1.0, 4.0, 1.0])
res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]])
unit_weights = np.ones(3)
x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964])
def test_basic(self):
assert_allclose(cov(self.x1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
res = np.array([[1., -1.j], [1.j, 1.]])
assert_allclose(cov(x), res)
assert_allclose(cov(x, aweights=np.ones(3)), res)
def test_xy(self):
x = np.array([[1, 2, 3]])
y = np.array([[1j, 2j, 3j]])
assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]]))
def test_empty(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(cov(np.array([])), np.nan)
assert_array_equal(cov(np.array([]).reshape(0, 2)),
np.array([]).reshape(0, 0))
assert_array_equal(cov(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
def test_wrong_ddof(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
assert_array_equal(cov(self.x1, ddof=5),
np.array([[np.inf, -np.inf],
[-np.inf, np.inf]]))
def test_1D_rowvar(self):
assert_allclose(cov(self.x3), cov(self.x3, rowvar=0))
y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0))
def test_1D_variance(self):
assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
def test_fweights(self):
assert_allclose(cov(self.x2, fweights=self.frequencies),
cov(self.x2_repeats))
assert_allclose(cov(self.x1, fweights=self.frequencies),
self.res2)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies),
self.res1)
nonint = self.frequencies + 0.5
assert_raises(TypeError, cov, self.x1, fweights=nonint)
f = np.ones((2, 3), dtype=np.integer)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
f = np.ones(2, dtype=np.integer)
assert_raises(RuntimeError, cov, self.x1, fweights=f)
f = -1 * np.ones(3, dtype=np.integer)
assert_raises(ValueError, cov, self.x1, fweights=f)
def test_aweights(self):
assert_allclose(cov(self.x1, aweights=self.weights), self.res3)
assert_allclose(cov(self.x1, aweights=3.0 * self.weights),
cov(self.x1, aweights=self.weights))
assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1)
w = np.ones((2, 3))
assert_raises(RuntimeError, cov, self.x1, aweights=w)
w = np.ones(2)
assert_raises(RuntimeError, cov, self.x1, aweights=w)
w = -1.0 * np.ones(3)
assert_raises(ValueError, cov, self.x1, aweights=w)
def test_unit_fweights_and_aweights(self):
assert_allclose(cov(self.x2, fweights=self.frequencies,
aweights=self.unit_weights),
cov(self.x2_repeats))
assert_allclose(cov(self.x1, fweights=self.frequencies,
aweights=self.unit_weights),
self.res2)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.unit_weights),
self.res1)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.weights),
self.res3)
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=3.0 * self.weights),
cov(self.x1, aweights=self.weights))
assert_allclose(cov(self.x1, fweights=self.unit_frequencies,
aweights=self.unit_weights),
self.res1)
class Test_I0(object):
def test_simple(self):
assert_almost_equal(
i0(0.5),
np.array(1.0634833707413234))
A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549])
expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])
assert_almost_equal(i0(A), expected)
assert_almost_equal(i0(-A), expected)
B = np.array([[0.827002, 0.99959078],
[0.89694769, 0.39298162],
[0.37954418, 0.05206293],
[0.36465447, 0.72446427],
[0.48164949, 0.50324519]])
assert_almost_equal(
i0(B),
np.array([[1.17843223, 1.26583466],
[1.21147086, 1.03898290],
[1.03633899, 1.00067775],
[1.03352052, 1.13557954],
[1.05884290, 1.06432317]]))
# Regression test for gh-11205
i0_0 = np.i0([0.])
assert_equal(i0_0.shape, (1,))
assert_array_equal(np.i0([0.]), np.array([1.]))
def test_non_array(self):
a = np.arange(4)
class array_like:
__array_interface__ = a.__array_interface__
def __array_wrap__(self, arr):
return self
# E.g. pandas series survive ufunc calls through array-wrap:
assert isinstance(np.abs(array_like()), array_like)
exp = np.i0(a)
res = np.i0(array_like())
assert_array_equal(exp, res)
class TestKaiser(object):
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
assert_almost_equal(kaiser(0, 1.0),
np.array([]))
assert_almost_equal(kaiser(2, 1.0),
np.array([0.78984831, 0.78984831]))
assert_almost_equal(kaiser(5, 1.0),
np.array([0.78984831, 0.94503323, 1.,
0.94503323, 0.78984831]))
assert_almost_equal(kaiser(5, 1.56789),
np.array([0.58285404, 0.88409679, 1.,
0.88409679, 0.58285404]))
def test_int_beta(self):
kaiser(3, 4)
class TestMsort(object):
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
[0.36844147, 0.37325583, 0.96098397],
[0.64864341, 0.52929049, 0.39172155]])
assert_almost_equal(
msort(A),
np.array([[0.36844147, 0.37325583, 0.39172155],
[0.44567325, 0.52929049, 0.54900530],
[0.64864341, 0.79115165, 0.96098397]]))
class TestMeshgrid(object):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
assert_array_equal(X, np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]))
assert_array_equal(Y, np.array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]]))
def test_single_input(self):
[X] = meshgrid([1, 2, 3, 4])
assert_array_equal(X, np.array([1, 2, 3, 4]))
def test_no_input(self):
args = []
assert_array_equal([], meshgrid(*args))
assert_array_equal([], meshgrid(*args, copy=False))
def test_indexing(self):
x = [1, 2, 3]
y = [4, 5, 6, 7]
[X, Y] = meshgrid(x, y, indexing='ij')
assert_array_equal(X, np.array([[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3]]))
assert_array_equal(Y, np.array([[4, 5, 6, 7],
[4, 5, 6, 7],
[4, 5, 6, 7]]))
# Test expected shapes:
z = [8, 9]
assert_(meshgrid(x, y)[0].shape == (4, 3))
assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4))
assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2))
assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2))
assert_raises(ValueError, meshgrid, x, y, indexing='notvalid')
def test_sparse(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True)
assert_array_equal(X, np.array([[1, 2, 3]]))
assert_array_equal(Y, np.array([[4], [5], [6], [7]]))
def test_invalid_arguments(self):
# Test that meshgrid complains about invalid arguments
# Regression test for issue #4755:
# https://github.com/numpy/numpy/issues/4755
assert_raises(TypeError, meshgrid,
[1, 2, 3], [4, 5, 6, 7], indices='ij')
def test_return_type(self):
# Test for appropriate dtype in returned arrays.
# Regression test for issue #5297
# https://github.com/numpy/numpy/issues/5297
x = np.arange(0, 10, dtype=np.float32)
y = np.arange(10, 20, dtype=np.float64)
X, Y = np.meshgrid(x,y)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
# copy
X, Y = np.meshgrid(x,y, copy=True)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
# sparse
X, Y = np.meshgrid(x,y, sparse=True)
assert_(X.dtype == x.dtype)
assert_(Y.dtype == y.dtype)
def test_writeback(self):
# Issue 8561
X = np.array([1.1, 2.2])
Y = np.array([3.3, 4.4])
x, y = np.meshgrid(X, Y, sparse=False, copy=True)
x[0, :] = 0
assert_equal(x[0, :], 0)
assert_equal(x[1, :], X)
class TestPiecewise(object):
def test_simple(self):
# Condition is single bool list
x = piecewise([0, 0], [True, False], [1])
assert_array_equal(x, [1, 0])
# List of conditions: single bool list
x = piecewise([0, 0], [[True, False]], [1])
assert_array_equal(x, [1, 0])
# Conditions is single bool array
x = piecewise([0, 0], np.array([True, False]), [1])
assert_array_equal(x, [1, 0])
# Condition is single int array
x = piecewise([0, 0], np.array([1, 0]), [1])
assert_array_equal(x, [1, 0])
# List of conditions: int array
x = piecewise([0, 0], [np.array([1, 0])], [1])
assert_array_equal(x, [1, 0])
x = piecewise([0, 0], [[False, True]], [lambda x:-1])
assert_array_equal(x, [0, -1])
assert_raises_regex(ValueError, '1 or 2 functions are expected',
piecewise, [0, 0], [[False, True]], [])
assert_raises_regex(ValueError, '1 or 2 functions are expected',
piecewise, [0, 0], [[False, True]], [1, 2, 3])
def test_two_conditions(self):
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
def test_scalar_domains_three_conditions(self):
x = piecewise(3, [True, False, False], [4, 2, 0])
assert_equal(x, 4)
def test_default(self):
# No value specified for x[1], should be 0
x = piecewise([1, 2], [True, False], [2])
assert_array_equal(x, [2, 0])
# Should set x[1] to 3
x = piecewise([1, 2], [True, False], [2, 3])
assert_array_equal(x, [2, 3])
def test_0d(self):
x = np.array(3)
y = piecewise(x, x > 3, [4, 0])
assert_(y.ndim == 0)
assert_(y == 0)
x = 5
y = piecewise(x, [True, False], [1, 0])
assert_(y.ndim == 0)
assert_(y == 1)
# With 3 ranges (It was failing, before)
y = piecewise(x, [False, False, True], [1, 2, 3])
assert_array_equal(y, 3)
def test_0d_comparison(self):
x = 3
y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
assert_equal(y, 4)
# With 3 ranges (It was failing, before)
x = 4
y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3])
assert_array_equal(y, 2)
assert_raises_regex(ValueError, '2 or 3 functions are expected',
piecewise, x, [x <= 3, x > 3], [1])
assert_raises_regex(ValueError, '2 or 3 functions are expected',
piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1])
def test_0d_0d_condition(self):
x = np.array(3)
c = np.array(x > 3)
y = piecewise(x, [c], [1, 2])
assert_equal(y, 2)
def test_multidimensional_extrafunc(self):
x = np.array([[-2.5, -1.5, -0.5],
[0.5, 1.5, 2.5]])
y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
assert_array_equal(y, np.array([[-1., -1., -1.],
[3., 3., 1.]]))
class TestBincount(object):
def test_simple(self):
y = np.bincount(np.arange(4))
assert_array_equal(y, np.ones(4))
def test_simple2(self):
y = np.bincount(np.array([1, 5, 2, 4, 1]))
assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1]))
def test_simple_weight(self):
x = np.arange(4)
w = np.array([0.2, 0.3, 0.5, 0.1])
y = np.bincount(x, w)
assert_array_equal(y, w)
def test_simple_weight2(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1]))
def test_with_minlength(self):
x = np.array([0, 1, 0, 1, 1])
y = np.bincount(x, minlength=3)
assert_array_equal(y, np.array([2, 3, 0]))
x = []
y = np.bincount(x, minlength=0)
assert_array_equal(y, np.array([]))
def test_with_minlength_smaller_than_maxvalue(self):
x = np.array([0, 1, 1, 2, 2, 3, 3])
y = np.bincount(x, minlength=2)
assert_array_equal(y, np.array([1, 2, 2, 2]))
y = np.bincount(x, minlength=0)
assert_array_equal(y, np.array([1, 2, 2, 2]))
def test_with_minlength_and_weights(self):
x = np.array([1, 2, 4, 5, 2])
w = np.array([0.2, 0.3, 0.5, 0.1, 0.2])
y = np.bincount(x, w, 8)
assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0]))
def test_empty(self):
x = np.array([], dtype=int)
y = np.bincount(x)
assert_array_equal(x, y)
def test_empty_with_minlength(self):
x = np.array([], dtype=int)
y = np.bincount(x, minlength=5)
assert_array_equal(y, np.zeros(5, dtype=int))
def test_with_incorrect_minlength(self):
x = np.array([], dtype=int)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must not be negative",
lambda: np.bincount(x, minlength=-1))
x = np.arange(5)
assert_raises_regex(TypeError,
"'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
assert_raises_regex(ValueError,
"must not be negative",
lambda: np.bincount(x, minlength=-1))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_dtype_reference_leaks(self):
# gh-6805
intp_refcount = sys.getrefcount(np.dtype(np.intp))
double_refcount = sys.getrefcount(np.dtype(np.double))
for j in range(10):
np.bincount([1, 2, 3])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
for j in range(10):
np.bincount([1, 2, 3], [4, 5, 6])
assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount)
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
class TestInterp(object):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
assert_raises(ValueError, interp, 0, [0], [1, 2])
assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
assert_raises(ValueError, interp, 0, [], [], period=360)
assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
def test_basic(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.linspace(0, 1, 50)
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
# Needs range of sizes to test different code paths.
# size ==1 is special cased, 1 < size < 5 is linear search, and
# size >= 5 goes through local search and possibly binary search.
for size in range(1, 10):
xp = np.arange(size, dtype=np.double)
yp = np.ones(size, dtype=np.double)
incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
decpts = incpts[::-1]
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
inctgt = np.array([0, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
inctgt = np.array([1, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
inctgt = np.array([0, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = 0
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = .3
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float32(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.float64(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
def test_non_finite_behavior_exact_x(self):
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2, np.inf, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
fp = [1, 2, np.nan, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
@pytest.fixture(params=[
lambda x: np.float_(x),
lambda x: _make_complex(x, 0),
lambda x: _make_complex(0, x),
lambda x: _make_complex(x, np.multiply(x, -2))
], ids=[
'real',
'complex-real',
'complex-imag',
'complex-both'
])
def sc(self, request):
""" scale function used by the below tests """
return request.param
def test_non_finite_any_nan(self, sc):
""" test that nans are propagated """
assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan))
def test_non_finite_inf(self, sc):
""" Test that interp between opposite infs gives nan """
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan))
# unless the y values are equal
assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10))
def test_non_finite_half_inf_xf(self, sc):
""" Test that interp where both axes have a bound at inf gives nan """
assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan))
def test_non_finite_half_inf_x(self, sc):
""" Test interp where the x axis has a bound at inf """
assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10))
assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0))
assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
def test_non_finite_half_inf_f(self, sc):
""" Test interp where the f axis has a bound at inf """
assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf))
assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf))
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5))*1.0j
x0 = 0.3
y0 = x0 + (1+x0)*1.0j
assert_almost_equal(np.interp(x0, x, y), y0)
# test complex left and right
x0 = -1
left = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, left=left), left)
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
# test complex non finite
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2+1j, np.inf, 4]
y = [1, 2+1j, np.inf+0.5j, np.inf, 4]
assert_almost_equal(np.interp(x, xp, fp), y)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5+1.0j, 10+2j, 3+3j, 4+4j]
y = [7.5+1.5j, 5.+1.0j, 8.75+1.75j, 6.25+1.25j, 3.+3j, 3.25+3.25j,
3.5+3.5j, 3.75+3.75j]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def test_zero_dimensional_interpolation_point(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 1, 5)
x0 = np.array(.3)
assert_almost_equal(np.interp(x0, x, y), x0)
xp = np.array([0, 2, 4])
fp = np.array([1, -1, 1])
actual = np.interp(np.array(1), xp, fp)
assert_equal(actual, 0)
assert_(isinstance(actual, np.float64))
actual = np.interp(np.array(4.5), xp, fp, period=4)
assert_equal(actual, 0.5)
assert_(isinstance(actual, np.float64))
def test_if_len_x_is_small(self):
xp = np.arange(0, 10, 0.0001)
fp = np.sin(xp)
assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
def test_period(self):
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
fp = [5, 10, 3, 4]
y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
x = np.array(x, order='F').reshape(2, -1)
y = np.array(y, order='C').reshape(2, -1)
assert_almost_equal(np.interp(x, xp, fp, period=360), y)
def compare_results(res, desired):
for i in range(len(desired)):
assert_array_equal(res[i], desired[i])
class TestPercentile(object):
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.percentile(x, 0), 0.)
assert_equal(np.percentile(x, 100), 3.5)
assert_equal(np.percentile(x, 50), 1.75)
x[1] = np.nan
assert_equal(np.percentile(x, 0), np.nan)
assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
def test_fraction(self):
x = [Fraction(i, 2) for i in np.arange(8)]
p = np.percentile(x, Fraction(0))
assert_equal(p, Fraction(0))
assert_equal(type(p), Fraction)
p = np.percentile(x, Fraction(100))
assert_equal(p, Fraction(7, 2))
assert_equal(type(p), Fraction)
p = np.percentile(x, Fraction(50))
assert_equal(p, Fraction(7, 4))
assert_equal(type(p), Fraction)
def test_api(self):
d = np.ones(5)
np.percentile(d, 5, None, None, False)
np.percentile(d, 5, None, None, False, 'linear')
o = np.ones((1,))
np.percentile(d, 5, None, o, False, 'linear')
def test_2D(self):
x = np.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1])
def test_linear(self):
# Test defaults
assert_equal(np.percentile(range(10), 50), 4.5)
# explicitly specify interpolation_method 'linear' (the default)
assert_equal(np.percentile(range(10), 50,
interpolation='linear'), 4.5)
def test_lower_higher(self):
# interpolation_method 'lower'/'higher'
assert_equal(np.percentile(range(10), 50,
interpolation='lower'), 4)
assert_equal(np.percentile(range(10), 50,
interpolation='higher'), 5)
def test_midpoint(self):
assert_equal(np.percentile(range(10), 51,
interpolation='midpoint'), 4.5)
assert_equal(np.percentile(range(11), 51,
interpolation='midpoint'), 5.5)
assert_equal(np.percentile(range(11), 50,
interpolation='midpoint'), 5)
def test_nearest(self):
assert_equal(np.percentile(range(10), 51,
interpolation='nearest'), 5)
assert_equal(np.percentile(range(10), 49,
interpolation='nearest'), 4)
def test_sequence(self):
x = np.arange(8) * 0.5
assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75])
def test_axis(self):
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0])
r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]]
assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0)
r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]]
assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T)
# ensure qth axis is always first as with np.array(old_percentile(..))
x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
assert_equal(np.percentile(x, (25, 50)).shape, (2,))
assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,))
assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6))
assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5))
assert_equal(
np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50),
interpolation="higher").shape, (2,))
assert_equal(np.percentile(x, (25, 50, 75),
interpolation="higher").shape, (3,))
assert_equal(np.percentile(x, (25, 50), axis=0,
interpolation="higher").shape, (2, 4, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=1,
interpolation="higher").shape, (2, 3, 5, 6))
assert_equal(np.percentile(x, (25, 50), axis=2,
interpolation="higher").shape, (2, 3, 4, 6))
assert_equal(np.percentile(x, (25, 50), axis=3,
interpolation="higher").shape, (2, 3, 4, 5))
assert_equal(np.percentile(x, (25, 50, 75), axis=1,
interpolation="higher").shape, (3, 3, 5, 6))
def test_scalar_q(self):
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50), 5.5)
assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
assert_equal(np.percentile(x, 50, axis=0), r0)
assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
r1 = np.array([1.5, 5.5, 9.5])
assert_almost_equal(np.percentile(x, 50, axis=1), r1)
assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape)
out = np.empty(1)
assert_equal(np.percentile(x, 50, out=out), 5.5)
assert_equal(out, 5.5)
out = np.empty(4)
assert_equal(np.percentile(x, 50, axis=0, out=out), r0)
assert_equal(out, r0)
out = np.empty(3)
assert_equal(np.percentile(x, 50, axis=1, out=out), r1)
assert_equal(out, r1)
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)
assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
c0 = np.percentile(x, 50, interpolation='lower', axis=0)
assert_equal(c0, r0)
assert_equal(c0.shape, r0.shape)
r1 = np.array([1., 5., 9.])
c1 = np.percentile(x, 50, interpolation='lower', axis=1)
assert_almost_equal(c1, r1)
assert_equal(c1.shape, r1.shape)
out = np.empty((), dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', out=out)
assert_equal(c, 5)
assert_equal(out, 5)
out = np.empty(4, dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', axis=0, out=out)
assert_equal(c, r0)
assert_equal(out, r0)
out = np.empty(3, dtype=x.dtype)
c = np.percentile(x, 50, interpolation='lower', axis=1, out=out)
assert_equal(c, r1)
assert_equal(out, r1)
def test_exception(self):
assert_raises(ValueError, np.percentile, [1, 2], 56,
interpolation='foobar')
assert_raises(ValueError, np.percentile, [1], 101)
assert_raises(ValueError, np.percentile, [1], -1)
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101])
assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1])
def test_percentile_list(self):
assert_equal(np.percentile([1, 2, 3], 0), 1)
def test_percentile_out(self):
x = np.array([1, 2, 3])
y = np.zeros((3,))
p = (1, 2, 3)
np.percentile(x, p, out=y)
assert_equal(y, np.percentile(x, p))
x = np.array([[1, 2, 3],
[4, 5, 6]])
y = np.zeros((3, 3))
np.percentile(x, p, axis=0, out=y)
assert_equal(y, np.percentile(x, p, axis=0))
y = np.zeros((3, 2))
np.percentile(x, p, axis=1, out=y)
assert_equal(y, np.percentile(x, p, axis=1))
x = np.arange(12).reshape(3, 4)
# q.dim > 1, float
r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]])
out = np.empty((2, 4))
assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0)
assert_equal(out, r0)
r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]])
out = np.empty((2, 3))
assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1)
assert_equal(out, r1)
# q.dim > 1, int
r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]])
out = np.empty((2, 4), dtype=x.dtype)
c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out)
assert_equal(c, r0)
assert_equal(out, r0)
r1 = np.array([[0, 4, 8], [1, 5, 9]])
out = np.empty((2, 3), dtype=x.dtype)
c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out)
assert_equal(c, r1)
assert_equal(out, r1)
def test_percentile_empty_dim(self):
# empty dims are preserved
d = np.arange(11 * 2).reshape(11, 1, 2, 1)
assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2))
assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2))
assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1))
assert_array_equal(np.percentile(d, 50, axis=2,
interpolation='midpoint').shape,
(11, 1, 1))
assert_array_equal(np.percentile(d, 50, axis=-2,
interpolation='midpoint').shape,
(11, 1, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape,
(2, 1, 2, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape,
(2, 11, 2, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape,
(2, 11, 1, 1))
assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape,
(2, 11, 1, 2))
def test_percentile_no_overwrite(self):
a = np.array([2, 3, 4, 1])
np.percentile(a, [50], overwrite_input=False)
assert_equal(a, np.array([2, 3, 4, 1]))
a = np.array([2, 3, 4, 1])
np.percentile(a, [50])
assert_equal(a, np.array([2, 3, 4, 1]))
def test_no_p_overwrite(self):
p = np.linspace(0., 100., num=5)
np.percentile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, np.linspace(0., 100., num=5))
p = np.linspace(0., 100., num=5).tolist()
np.percentile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, np.linspace(0., 100., num=5).tolist())
def test_percentile_overwrite(self):
a = np.array([2, 3, 4, 1])
b = np.percentile(a, [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True)
assert_equal(b, np.array([2.5]))
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
x = np.moveaxis(x, -1, 0)
assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)),
np.percentile(x, [25, 60], axis=None))
assert_equal(np.percentile(x, [25, 60], axis=(0,)),
np.percentile(x, [25, 60], axis=0))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0],
np.percentile(d[:,:,:, 0].flatten(), 25))
assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1],
np.percentile(d[:,:, 1,:].flatten(), [10, 90]))
assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2],
np.percentile(d[:,:, 2,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2],
np.percentile(d[2,:,:,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1],
np.percentile(d[2, 1,:,:].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1],
np.percentile(d[2,:,:, 1].flatten(), 25))
assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2],
np.percentile(d[2,:, 2,:].flatten(), 25))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.percentile, d, axis=-5, q=25)
assert_raises(np.AxisError, np.percentile, d, axis=(0, -5), q=25)
assert_raises(np.AxisError, np.percentile, d, axis=4, q=25)
assert_raises(np.AxisError, np.percentile, d, axis=(0, 4), q=25)
# each of these refers to the same axis twice
assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25)
assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25)
assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25)
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3),
keepdims=True).shape, (2, 1, 1, 7, 1))
assert_equal(np.percentile(d, [1, 7], axis=(0, 3),
keepdims=True).shape, (2, 1, 5, 7, 1))
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.percentile(d, 0, 0, out=o), o)
assert_equal(np.percentile(d, 0, 0, interpolation='nearest', out=o), o)
o = np.zeros((3,))
assert_equal(np.percentile(d, 1, 1, out=o), o)
assert_equal(np.percentile(d, 1, 1, interpolation='nearest', out=o), o)
o = np.zeros(())
assert_equal(np.percentile(d, 2, out=o), o)
assert_equal(np.percentile(d, 2, interpolation='nearest', out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.percentile(d, 0, 0, out=o), o)
assert_equal(
np.percentile(d, 0, 0, interpolation='nearest', out=o), o)
o = np.zeros((3,))
assert_equal(np.percentile(d, 1, 1, out=o), o)
assert_equal(
np.percentile(d, 1, 1, interpolation='nearest', out=o), o)
o = np.zeros(())
assert_equal(np.percentile(d, 1, out=o), o)
assert_equal(
np.percentile(d, 1, interpolation='nearest', out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
assert_equal(np.percentile(a, 0.3), np.nan)
assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
np.array([np.nan] * 2))
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_equal(np.percentile(a, 0.3), np.nan)
assert_equal(np.percentile(a, 0.3).ndim, 0)
# axis0 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.percentile(a, 0.3, 0), b)
# axis0 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], 0)
b[:, 2, 3] = np.nan
b[:, 1, 2] = np.nan
assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
# axis1 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.percentile(a, 0.3, 1), b)
# axis1 not zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)
b[:, 1, 3] = np.nan
b[:, 1, 2] = np.nan
assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
# axis02 zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.percentile(a, 0.3, (0, 2)), b)
# axis02 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2))
b[:, 1] = np.nan
b[:, 2] = np.nan
assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
# axis02 not zerod with nearest interpolation
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2), interpolation='nearest')
b[:, 1] = np.nan
b[:, 2] = np.nan
assert_equal(np.percentile(
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
class TestQuantile(object):
# most of this is already tested by TestPercentile
def test_basic(self):
x = np.arange(8) * 0.5
assert_equal(np.quantile(x, 0), 0.)
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
def test_fraction(self):
# fractional input, integral quantile
x = [Fraction(i, 2) for i in np.arange(8)]
q = np.quantile(x, 0)
assert_equal(q, 0)
assert_equal(type(q), Fraction)
q = np.quantile(x, 1)
assert_equal(q, Fraction(7, 2))
assert_equal(type(q), Fraction)
q = np.quantile(x, Fraction(1, 2))
assert_equal(q, Fraction(7, 4))
assert_equal(type(q), Fraction)
# repeat with integral input but fractional quantile
x = np.arange(8)
assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
p = p0.copy()
np.quantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
p0 = p0.tolist()
p = p.tolist()
np.quantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
class TestMedian(object):
def test_basic(self):
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_equal(np.median(a0), 1)
assert_allclose(np.median(a1), 0.5)
assert_allclose(np.median(a2), 2.5)
assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5])
assert_equal(np.median(a2, axis=1), [1, 4])
assert_allclose(np.median(a2, axis=None), 2.5)
a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775])
assert_almost_equal((a[1] + a[3]) / 2., np.median(a))
a = np.array([0.0463301, 0.0444502, 0.141249])
assert_equal(a[0], np.median(a))
a = np.array([0.0444502, 0.141249, 0.0463301])
assert_equal(a[-1], np.median(a))
# check array scalar result
assert_equal(np.median(a).ndim, 0)
a[1] = np.nan
assert_equal(np.median(a).ndim, 0)
def test_axis_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]:
orig = a.copy()
np.median(a, axis=None)
for ax in range(a.ndim):
np.median(a, axis=ax)
assert_array_equal(a, orig)
assert_allclose(np.median(a3, axis=0), [3, 4])
assert_allclose(np.median(a3.T, axis=1), [3, 4])
assert_allclose(np.median(a3), 3.5)
assert_allclose(np.median(a3, axis=None), 3.5)
assert_allclose(np.median(a3.T), 3.5)
def test_overwrite_keyword(self):
a3 = np.array([[2, 3],
[0, 1],
[6, 7],
[4, 5]])
a0 = np.array(1)
a1 = np.arange(2)
a2 = np.arange(6).reshape(2, 3)
assert_allclose(np.median(a0.copy(), overwrite_input=True), 1)
assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5)
assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0),
[1.5, 2.5, 3.5])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4])
assert_allclose(
np.median(a2.copy(), overwrite_input=True, axis=None), 2.5)
assert_allclose(
np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4])
assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1),
[3, 4])
a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5))
np.random.shuffle(a4.ravel())
assert_allclose(np.median(a4, axis=None),
np.median(a4.copy(), axis=None, overwrite_input=True))
assert_allclose(np.median(a4, axis=0),
np.median(a4.copy(), axis=0, overwrite_input=True))
assert_allclose(np.median(a4, axis=1),
np.median(a4.copy(), axis=1, overwrite_input=True))
assert_allclose(np.median(a4, axis=2),
np.median(a4.copy(), axis=2, overwrite_input=True))
def test_array_like(self):
x = [1, 2, 3]
assert_almost_equal(np.median(x), 2)
x2 = [x]
assert_almost_equal(np.median(x2), 2)
assert_allclose(np.median(x2, axis=0), x)
def test_subclass(self):
# gh-3846
class MySubClass(np.ndarray):
def __new__(cls, input_array, info=None):
obj = np.asarray(input_array).view(cls)
obj.info = info
return obj
def mean(self, axis=None, dtype=None, out=None):
return -7
a = MySubClass([1, 2, 3])
assert_equal(np.median(a), -7)
def test_out(self):
o = np.zeros((4,))
d = np.ones((3, 4))
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_out_nan(self):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', RuntimeWarning)
o = np.zeros((4,))
d = np.ones((3, 4))
d[2, 1] = np.nan
assert_equal(np.median(d, 0, out=o), o)
o = np.zeros((3,))
assert_equal(np.median(d, 1, out=o), o)
o = np.zeros(())
assert_equal(np.median(d, out=o), o)
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a, axis=0), np.nan)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
assert_equal(np.median(a), np.nan)
assert_equal(np.median(a).ndim, 0)
# axis0
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.median(a, 0), b)
# axis1
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
assert_equal(np.median(a, 1), b)
# axis02
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
b[1] = np.nan
b[2] = np.nan
assert_equal(np.median(a, (0, 2)), b)
def test_empty(self):
# mean(empty array) emits two warnings: empty slice and divide by 0
a = np.array([], dtype=float)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
assert_equal(len(w), 2)
# multiple dimensions
a = np.array([], dtype=float, ndmin=3)
# no axis
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
# axis 0 and 1
b = np.array([], dtype=float, ndmin=2)
assert_equal(np.median(a, axis=0), b)
assert_equal(np.median(a, axis=1), b)
# axis 2
b = np.array(np.nan, dtype=float, ndmin=2)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a, axis=2), b)
assert_(w[0].category is RuntimeWarning)
def test_object(self):
o = np.arange(7.)
assert_(type(np.median(o.astype(object))), float)
o[2] = np.nan
assert_(type(np.median(o.astype(object))), float)
def test_extended_axis(self):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
x = np.moveaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None))
assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0))
assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1))
d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11))
np.random.shuffle(d.ravel())
assert_equal(np.median(d, axis=(0, 1, 2))[0],
np.median(d[:,:,:, 0].flatten()))
assert_equal(np.median(d, axis=(0, 1, 3))[1],
np.median(d[:,:, 1,:].flatten()))
assert_equal(np.median(d, axis=(3, 1, -4))[2],
np.median(d[:,:, 2,:].flatten()))
assert_equal(np.median(d, axis=(3, 1, 2))[2],
np.median(d[2,:,:,:].flatten()))
assert_equal(np.median(d, axis=(3, 2))[2, 1],
np.median(d[2, 1,:,:].flatten()))
assert_equal(np.median(d, axis=(1, -2))[2, 1],
np.median(d[2,:,:, 1].flatten()))
assert_equal(np.median(d, axis=(1, 3))[2, 2],
np.median(d[2,:, 2,:].flatten()))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.median, d, axis=-5)
assert_raises(np.AxisError, np.median, d, axis=(0, -5))
assert_raises(np.AxisError, np.median, d, axis=4)
assert_raises(np.AxisError, np.median, d, axis=(0, 4))
assert_raises(ValueError, np.median, d, axis=(1, 1))
def test_keepdims(self):
d = np.ones((3, 5, 7, 11))
assert_equal(np.median(d, axis=None, keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape,
(1, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape,
(1, 5, 7, 1))
assert_equal(np.median(d, axis=(1,), keepdims=True).shape,
(3, 1, 7, 11))
assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape,
(1, 1, 1, 1))
assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape,
(1, 1, 7, 1))
class TestAdd_newdoc_ufunc(object):
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah")
def test_string_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
class TestAdd_newdoc(object):
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
class TestSortComplex(object):
@pytest.mark.parametrize("type_in, type_out", [
('l', 'D'),
('h', 'F'),
('H', 'F'),
('b', 'F'),
('B', 'F'),
('g', 'G'),
])
def test_sort_real(self, type_in, type_out):
# sort_complex() type casting for real input types
a = np.array([5, 3, 6, 2, 1], dtype=type_in)
actual = np.sort_complex(a)
expected = np.sort(a).astype(type_out)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
def test_sort_complex(self):
# sort_complex() handling of complex input
a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
actual = np.sort_complex(a)
assert_equal(actual, expected)
assert_equal(actual.dtype, expected.dtype)
|
apache-2.0
|
btabibian/scikit-learn
|
sklearn/datasets/__init__.py
|
61
|
3734
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
shaochengcheng/hoaxy-network
|
setup.py
|
1
|
1178
|
from os.path import dirname, join
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'hnetwork/VERSION'), 'rb') as f:
version = f.read().decode('ascii').strip()
setup(
name='hoaxy-network',
version=version,
url='http://cnets.indiana.edu',
description='Network analysis of hoaxy data',
long_description=open('README.md').read(),
author='Chengcheng Shao',
maintainer='Chengcheng Shao',
maintainer_email='[email protected]',
license='GPLv3',
entry_points={'console_scripts': ['hnetwork = hnetwork.cmdline:main']},
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
zip_safe=True,
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: GPL :: Version 3',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'networkx',
'pandas',
'docopt>=0.6.2',
'schema',
],)
|
gpl-3.0
|
mjirik/larVolumeToObj
|
larVolumeToObj/computation/old/step_calcchains_serial_tobinary_filter_cyx.py
|
2
|
8113
|
# -*- coding: utf-8 -*-
from lar import *
from scipy import *
import json
import scipy
import numpy as np
import time as tm
import gc
from pngstack2array3d import *
import struct
import getopt, sys
import traceback
#
import matplotlib.pyplot as plt
#cython
import larVolumeToObjximport; pyximport.install()
import calc_chains_helper as cch
# ------------------------------------------------------------
# Logging & Timer
# ------------------------------------------------------------
logging_level = 0;
# 0 = no_logging
# 1 = few details
# 2 = many details
# 3 = many many details
def log(n, l):
if __name__=="__main__" and n <= logging_level:
for s in l:
print "Log:", s;
timer = 1;
timer_last = tm.time()
def timer_start(s):
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer start:" + s]);
timer_last = tm.time();
def timer_stop():
global timer_last;
if __name__=="__main__" and timer == 1:
log(3, ["Timer stop :" + str(tm.time() - timer_last)]);
# ------------------------------------------------------------
# Configuration parameters
# ------------------------------------------------------------
PNG_EXTENSION = ".png"
BIN_EXTENSION = ".bin"
# ------------------------------------------------------------
# Utility toolbox
# ------------------------------------------------------------
def invertIndex(nx,ny,nz):
nx,ny,nz = nx+1,ny+1,nz+1
def invertIndex0(offset):
a0, b0 = offset / nx, offset % nx
a1, b1 = a0 / ny, a0 % ny
a2, b2 = a1 / nz, a1 % nz
return b0,b1,b2
return invertIndex0
def countFilesInADir(directory):
return len(os.walk(directory).next()[2])
def isArrayEmpty(arr):
return all(e == 0 for e in arr)
# ------------------------------------------------------------
def writeOffsetToFile(file, offsetCurr):
file.write( struct.pack('>I', offsetCurr[0]) )
file.write( struct.pack('>I', offsetCurr[1]) )
file.write( struct.pack('>I', offsetCurr[2]) )
# ------------------------------------------------------------
def computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, colorIdx,INPUT_DIR,DIR_O):
beginImageStack = 0
endImage = beginImageStack
MAX_CHAINS = colors
count = 0
fileName = "selettori-"
if (calculateout == True):
fileName = "output-"
saveTheColors = centroidsCalc
saveTheColors = np.array( sorted(saveTheColors.reshape(1,colors)[0]), dtype=np.int )
# print str(imageHeight) + '-' + str(imageWidth) + '-' + str(imageDepth)
# print str(imageDx) + '-' + str(imageDy) + '-' + str(imageDz)
# print str(Nx) + '-' + str(Ny) + '-' + str(Nz)
with open(DIR_O+'/'+fileName+str(saveTheColors[colorIdx])+BIN_EXTENSION, "wb") as newFile:
for zBlock in xrange(imageDepth/imageDz):
startImage = endImage
endImage = startImage + imageDz
xEnd, yEnd = 0,0
theImage,colors,theColors = pngstack2array3d(INPUT_DIR, startImage, endImage, colors, pixelCalc, centroidsCalc)
# theColors = theColors.reshape(1,colors)
# if (sorted(theColors[0]) != saveTheColors):
# log(1, [ "Error: colors have changed"] )
# sys.exit(2)
for xBlock in xrange(imageHeight/imageDx):
for yBlock in xrange(imageWidth/imageDy):
xStart, yStart = xBlock * imageDx, yBlock * imageDy
xEnd, yEnd = xStart+imageDx, yStart+imageDy
image = theImage[:, xStart:xEnd, yStart:yEnd]
nz,nx,ny = image.shape
count += 1
# Compute a quotient complex of chains with constant field
# ------------------------------------------------------------
chains3D_old = [];
chains3D = None
hasSomeOne = False
zStart = startImage - beginImageStack;
if (calculateout == True):
chains3D_old = cch.setList(nx,ny,nz, colorIdx, image,saveTheColors)
else:
hasSomeOne,chains3D = cch.setListNP(nx,ny,nz, colorIdx, image,saveTheColors)
# hasSomeOne,chains3D = cch.setParallelListNP(nx,ny,nz, colorIdx, image,saveTheColors)
# Compute the boundary complex of the quotient cell
# ------------------------------------------------------------
objectBoundaryChain = None
if (calculateout == True) and (len(chains3D_old) > 0):
objectBoundaryChain = larBoundaryChain(bordo3,chains3D_old)
# Save
if (calculateout == True):
if (objectBoundaryChain != None):
writeOffsetToFile( newFile, np.array([zStart,xStart,yStart], dtype=int32) )
newFile.write( bytearray( np.array(objectBoundaryChain.toarray().astype('b').flatten()) ) )
else:
if (hasSomeOne != False):
writeOffsetToFile( newFile, np.array([zStart,xStart,yStart], dtype=int32) )
newFile.write( bytearray( np.array(chains3D, dtype=np.dtype('b')) ) )
def runComputation(imageDx,imageDy,imageDz, colors,coloridx,calculateout, V,FV, INPUT_DIR,BEST_IMAGE,BORDER_FILE,DIR_O):
bordo3 = None
if (calculateout == True):
with open(BORDER_FILE, "r") as file:
bordo3_json = json.load(file)
ROWCOUNT = bordo3_json['ROWCOUNT']
COLCOUNT = bordo3_json['COLCOUNT']
ROW = np.asarray(bordo3_json['ROW'], dtype=np.int32)
COL = np.asarray(bordo3_json['COL'], dtype=np.int32)
DATA = np.asarray(bordo3_json['DATA'], dtype=np.int8)
bordo3 = csr_matrix((DATA,COL,ROW),shape=(ROWCOUNT,COLCOUNT));
imageHeight,imageWidth = getImageData(INPUT_DIR+str(BEST_IMAGE)+PNG_EXTENSION)
imageDepth = countFilesInADir(INPUT_DIR)
Nx,Ny,Nz = imageHeight/imageDx, imageWidth/imageDx, imageDepth/imageDz
try:
pixelCalc, centroidsCalc = centroidcalc(INPUT_DIR, BEST_IMAGE, colors)
computeChains(imageHeight,imageWidth,imageDepth, imageDx,imageDy,imageDz, Nx,Ny,Nz, calculateout,bordo3, colors,pixelCalc,centroidsCalc, coloridx,INPUT_DIR,DIR_O)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
log(1, [ "Error: " + ''.join('!! ' + line for line in lines) ]) # Log it or whatever here
sys.exit(2)
def main(argv):
ARGS_STRING = 'Args: -r -b <borderfile> -x <borderX> -y <borderY> -z <borderZ> -i <inputdirectory> -c <colors> -d <coloridx> -o <outputdir> -q <bestimage>'
try:
opts, args = getopt.getopt(argv,"rb:x:y:z:i:c:d:o:q:")
except getopt.GetoptError:
print ARGS_STRING
sys.exit(2)
nx = ny = nz = imageDx = imageDy = imageDz = 64
colors = 2
coloridx = 0
mandatory = 6
calculateout = False
#Files
BORDER_FILE = 'bordo3.json'
BEST_IMAGE = ''
DIR_IN = ''
DIR_O = ''
for opt, arg in opts:
if opt == '-x':
nx = ny = nz = imageDx = imageDy = imageDz = int(arg)
mandatory = mandatory - 1
elif opt == '-y':
ny = nz = imageDy = imageDz = int(arg)
elif opt == '-z':
nz = imageDz = int(arg)
elif opt == '-r':
calculateout = True
elif opt == '-i':
DIR_IN = arg + '/'
mandatory = mandatory - 1
elif opt == '-b':
BORDER_FILE = arg
mandatory = mandatory - 1
elif opt == '-o':
mandatory = mandatory - 1
DIR_O = arg
elif opt == '-c':
mandatory = mandatory - 1
colors = int(arg)
elif opt == '-d':
mandatory = mandatory - 1
coloridx = int(arg)
elif opt == '-q':
BEST_IMAGE = int(arg)
if mandatory != 0:
print 'Not all arguments where given'
print ARGS_STRING
sys.exit(2)
if (coloridx >= colors):
print 'Not all arguments where given (coloridx >= colors)'
print ARGS_STRING
sys.exit(2)
def ind(x,y,z): return x + (nx+1) * (y + (ny+1) * (z))
chunksize = nx * ny + nx * nz + ny * nz + 3 * nx * ny * nz
V = [[x,y,z] for z in xrange(nz+1) for y in xrange(ny+1) for x in xrange(nx+1) ]
v2coords = invertIndex(nx,ny,nz)
FV = []
for h in xrange(len(V)):
x,y,z = v2coords(h)
if (x < nx) and (y < ny): FV.append([h,ind(x+1,y,z),ind(x,y+1,z),ind(x+1,y+1,z)])
if (x < nx) and (z < nz): FV.append([h,ind(x+1,y,z),ind(x,y,z+1),ind(x+1,y,z+1)])
if (y < ny) and (z < nz): FV.append([h,ind(x,y+1,z),ind(x,y,z+1),ind(x,y+1,z+1)])
runComputation(imageDx, imageDy, imageDz, colors, coloridx, calculateout, V, FV, DIR_IN, BEST_IMAGE, BORDER_FILE, DIR_O)
if __name__ == "__main__":
main(sys.argv[1:])
|
mit
|
bjlittle/iris
|
lib/iris/tests/integration/plot/test_colorbar.py
|
1
|
3449
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Test interaction between :mod:`iris.plot` and
:func:`matplotlib.pyplot.colorbar`
"""
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests # isort:skip
import numpy as np
from iris.coords import AuxCoord
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
from iris.plot import (
contour,
contourf,
pcolor,
pcolormesh,
points,
scatter,
)
@tests.skip_plot
class TestColorBarCreation(tests.GraphicsTest):
def setUp(self):
super().setUp()
self.draw_functions = (contour, contourf, pcolormesh, pcolor)
self.cube = iris.tests.stock.lat_lon_cube()
self.cube.coord("longitude").guess_bounds()
self.cube.coord("latitude").guess_bounds()
self.traj_lon = AuxCoord(
np.linspace(-180, 180, 50),
standard_name="longitude",
units="degrees",
)
self.traj_lat = AuxCoord(
np.sin(np.deg2rad(self.traj_lon.points)) * 30.0,
standard_name="latitude",
units="degrees",
)
def test_common_draw_functions(self):
for draw_function in self.draw_functions:
mappable = draw_function(self.cube)
cbar = plt.colorbar()
self.assertIs(
cbar.mappable,
mappable,
msg="Problem with draw function iris.plot.{}".format(
draw_function.__name__
),
)
def test_common_draw_functions_specified_mappable(self):
for draw_function in self.draw_functions:
mappable_initial = draw_function(self.cube, cmap="cool")
_ = draw_function(self.cube)
cbar = plt.colorbar(mappable_initial)
self.assertIs(
cbar.mappable,
mappable_initial,
msg="Problem with draw function iris.plot.{}".format(
draw_function.__name__
),
)
def test_points_with_c_kwarg(self):
mappable = points(self.cube, c=self.cube.data)
cbar = plt.colorbar()
self.assertIs(cbar.mappable, mappable)
def test_points_with_c_kwarg_specified_mappable(self):
mappable_initial = points(self.cube, c=self.cube.data, cmap="cool")
_ = points(self.cube, c=self.cube.data)
cbar = plt.colorbar(mappable_initial)
self.assertIs(cbar.mappable, mappable_initial)
def test_scatter_with_c_kwarg(self):
mappable = scatter(
self.traj_lon, self.traj_lat, c=self.traj_lon.points
)
cbar = plt.colorbar()
self.assertIs(cbar.mappable, mappable)
def test_scatter_with_c_kwarg_specified_mappable(self):
mappable_initial = scatter(
self.traj_lon, self.traj_lat, c=self.traj_lon.points
)
_ = scatter(
self.traj_lon, self.traj_lat, c=self.traj_lon.points, cmap="cool"
)
cbar = plt.colorbar(mappable_initial)
self.assertIs(cbar.mappable, mappable_initial)
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
saapunki/solitadds
|
main.py
|
1
|
2867
|
#!/usr/bin/python
import sys, re, pdb, os
import logging
import argparse
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import datetime
import utils, data_helper
import analyze
def parse_args():
"""
Parse command line args.
Example
-------
python main.py --input-file-operative ../data/small/some-applications-operative-pub-20161031.csv --input-file-usage ../data/small/some-lupapiste-usage-pub-20161031.csv --output-file-applications ../target/application-summary.csv --output-file-users ../target/user-summary.csv
"""
parser = argparse.ArgumentParser(description='SOLITADDS analysis')
parser.add_argument('-io', '--input-file-operative', help='Input CSV file for operative data', required = False, default = os.getcwd() + "/test-data/some-applications-operative-pub-20161031.csv")
parser.add_argument('-iu', '--input-file-usage', help='Input CSV file for usage data', required = False, default = os.getcwd() + "/test-data/some-lupapiste-usage-pub-20161031.csv")
parser.add_argument('-oa', '--output-file-applications', help='Output CSV file for applications', required = False, default = os.getcwd() + "summary-applications.csv")
parser.add_argument('-ou', '--output-file-users', help='Output CSV file for users', required=False, default = os.getcwd() + "summary-users.csv")
args = vars(parser.parse_args())
return args
if __name__ == "__main__":
pd.set_option('display.width', 240)
args = parse_args()
input_file_operative = args['input_file_operative']
input_file_usage = args['input_file_usage']
output_file_applications = args['output_file_applications']
output_file_users = args['output_file_users']
analysis_start_time = datetime.datetime.now()
odf = data_helper.import_operative_data(input_file_operative)
udf = data_helper.import_usage_data(input_file_usage)
print("Total number of apps: {}".format(len(odf)))
print("Total number of events: {} with time range from {} to {} ".format(len(udf), udf['datetime'].min(), udf['datetime'].max()))
application_summary = analyze.summarize_applications(odf, udf)
application_summary.to_csv(output_file_applications, sep=';', encoding='utf-8')
user_summary = analyze.summarize_users(odf, udf)
user_summary.to_csv(output_file_users, sep=';', encoding='utf-8')
print("Analysis took {} seconds".format(datetime.datetime.now() - analysis_start_time))
fig = plt.figure(1)
plot = fig.add_subplot(111)
plot.set_title("Hakemukset viikonpaivittain")
plot.set_xlabel("Viikonpaiva")
plot.set_ylabel("Hakemuksia")
analyze.applications_by_week_day(application_summary)
canvas = FigureCanvasAgg(fig)
canvas.print_figure("testi_kuva", dpi=80)
|
mit
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/share/doc/networkx-2.2/examples/drawing/plot_atlas.py
|
5
|
2760
|
#!/usr/bin/env python
"""
=====
Atlas
=====
Atlas of all graphs of 6 nodes or less.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2004-2018 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import random
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot.")
import matplotlib.pyplot as plt
import networkx as nx
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic
from networkx.generators.atlas import graph_atlas_g
def atlas6():
""" Return the atlas of all connected graphs of 6 nodes or less.
Attempt to check for isomorphisms and remove.
"""
Atlas = graph_atlas_g()[0:208] # 208
# remove isolated nodes, only connected graphs are left
U = nx.Graph() # graph for union of all graphs in atlas
for G in Atlas:
zerodegree = [n for n in G if G.degree(n) == 0]
for n in zerodegree:
G.remove_node(n)
U = nx.disjoint_union(U, G)
# list of graphs of all connected components
C = nx.connected_component_subgraphs(U)
UU = nx.Graph()
# do quick isomorphic-like check, not a true isomorphism checker
nlist = [] # list of nonisomorphic graphs
for G in C:
# check against all nonisomorphic graphs so far
if not iso(G, nlist):
nlist.append(G)
UU = nx.disjoint_union(UU, G) # union the nonisomorphic graphs
return UU
def iso(G1, glist):
"""Quick and dirty nonisomorphism checker used to check isomorphisms."""
for G2 in glist:
if isomorphic(G1, G2):
return True
return False
if __name__ == '__main__':
G = atlas6()
print("graph has %d nodes with %d edges"
% (nx.number_of_nodes(G), nx.number_of_edges(G)))
print(nx.number_connected_components(G), "connected components")
plt.figure(1, figsize=(8, 8))
# layout graphs with positions using graphviz neato
pos = graphviz_layout(G, prog="neato")
# color nodes the same in each connected subgraph
C = nx.connected_component_subgraphs(G)
for g in C:
c = [random.random()] * nx.number_of_nodes(g) # random color...
nx.draw(g,
pos,
node_size=40,
node_color=c,
vmin=0.0,
vmax=1.0,
with_labels=False
)
plt.show()
|
gpl-3.0
|
blaze/dask
|
dask/dataframe/io/sql.py
|
1
|
15082
|
import numpy as np
import pandas as pd
import dask
from dask.dataframe.utils import PANDAS_GT_0240, PANDAS_VERSION
from dask.delayed import tokenize
from .io import from_delayed, from_pandas
from ... import delayed
from .. import methods
def read_sql_table(
table,
uri,
index_col,
divisions=None,
npartitions=None,
limits=None,
columns=None,
bytes_per_chunk="256 MiB",
head_rows=5,
schema=None,
meta=None,
engine_kwargs=None,
**kwargs,
):
"""
Create dataframe from an SQL table.
If neither divisions or npartitions is given, the memory footprint of the
first few rows will be determined, and partitions of size ~256MB will
be used.
Parameters
----------
table : string or sqlalchemy expression
Select columns from here.
uri : string
Full sqlalchemy URI for the database connection
index_col : string
Column which becomes the index, and defines the partitioning. Should
be a indexed column in the SQL server, and any orderable type. If the
type is number or time, then partition boundaries can be inferred from
npartitions or bytes_per_chunk; otherwide must supply explicit
``divisions=``.
``index_col`` could be a function to return a value, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
``index_col=sql.func.abs(sql.column("value")).label("abs(value)")``, or
``index_col=cast(sql.column("id"),types.BigInteger).label("id")`` to convert
the textfield ``id`` to ``BigInteger``.
Note ``sql``, ``cast``, ``types`` methods comes from ``sqlalchemy`` module.
Labeling columns created by functions or arithmetic operations is
required.
divisions: sequence
Values of the index column to split the table by. If given, this will
override npartitions and bytes_per_chunk. The divisions are the value
boundaries of the index column used to define the partitions. For
example, ``divisions=list('acegikmoqsuwz')`` could be used to partition
a string column lexographically into 12 partitions, with the implicit
assumption that each partition contains similar numbers of records.
npartitions : int
Number of partitions, if divisions is not given. Will split the values
of the index column linearly between limits, if given, or the column
max/min. The index column must be numeric or time for this to work
limits: 2-tuple or None
Manually give upper and lower range of values for use with npartitions;
if None, first fetches max/min from the DB. Upper limit, if
given, is inclusive.
columns : list of strings or None
Which columns to select; if None, gets all; can include sqlalchemy
functions, e.g.,
``sql.func.abs(sql.column('value')).label('abs(value)')``.
Labeling columns created by functions or arithmetic operations is
recommended.
bytes_per_chunk : str, int
If both divisions and npartitions is None, this is the target size of
each partition, in bytes
head_rows : int
How many rows to load for inferring the data-types, unless passing meta
meta : empty DataFrame or None
If provided, do not attempt to infer dtypes, but use these, coercing
all chunks on load
schema : str or None
If using a table name, pass this to sqlalchemy to select which DB
schema to use within the URI connection
engine_kwargs : dict or None
Specific db engine parameters for sqlalchemy
kwargs : dict
Additional parameters to pass to `pd.read_sql()`
Returns
-------
dask.dataframe
Examples
--------
>>> df = dd.read_sql_table('accounts', 'sqlite:///path/to/bank.db',
... npartitions=10, index_col='id') # doctest: +SKIP
"""
import sqlalchemy as sa
from sqlalchemy import sql
from sqlalchemy.sql import elements
if index_col is None:
raise ValueError("Must specify index column to partition on")
engine_kwargs = {} if engine_kwargs is None else engine_kwargs
engine = sa.create_engine(uri, **engine_kwargs)
m = sa.MetaData()
if isinstance(table, str):
table = sa.Table(table, m, autoload=True, autoload_with=engine, schema=schema)
index = table.columns[index_col] if isinstance(index_col, str) else index_col
if not isinstance(index_col, (str, elements.Label)):
raise ValueError(
"Use label when passing an SQLAlchemy instance as the index (%s)" % index
)
if divisions and npartitions:
raise TypeError("Must supply either divisions or npartitions, not both")
columns = (
[(table.columns[c] if isinstance(c, str) else c) for c in columns]
if columns
else list(table.columns)
)
if index_col not in columns:
columns.append(
table.columns[index_col] if isinstance(index_col, str) else index_col
)
if isinstance(index_col, str):
kwargs["index_col"] = index_col
else:
# function names get pandas auto-named
kwargs["index_col"] = index_col.name
if head_rows > 0:
# derive metadata from first few rows
q = sql.select(columns).limit(head_rows).select_from(table)
head = pd.read_sql(q, engine, **kwargs)
if head.empty:
# no results at all
name = table.name
schema = table.schema
head = pd.read_sql_table(name, uri, schema=schema, index_col=index_col)
return from_pandas(head, npartitions=1)
bytes_per_row = (head.memory_usage(deep=True, index=True)).sum() / head_rows
if meta is None:
meta = head.iloc[:0]
elif meta is None:
raise ValueError("Must provide meta if head_rows is 0")
else:
if divisions is None and npartitions is None:
raise ValueError(
"Must provide divisions or npartitions when using explicit meta."
)
if divisions is None:
if limits is None:
# calculate max and min for given index
q = sql.select([sql.func.max(index), sql.func.min(index)]).select_from(
table
)
minmax = pd.read_sql(q, engine)
maxi, mini = minmax.iloc[0]
dtype = minmax.dtypes["max_1"]
else:
mini, maxi = limits
dtype = pd.Series(limits).dtype
if npartitions is None:
q = sql.select([sql.func.count(index)]).select_from(table)
count = pd.read_sql(q, engine)["count_1"][0]
npartitions = (
int(
round(
count * bytes_per_row / dask.utils.parse_bytes(bytes_per_chunk)
)
)
or 1
)
if dtype.kind == "M":
divisions = methods.tolist(
pd.date_range(
start=mini,
end=maxi,
freq="%iS" % ((maxi - mini).total_seconds() / npartitions),
)
)
divisions[0] = mini
divisions[-1] = maxi
elif dtype.kind in ["i", "u", "f"]:
divisions = np.linspace(mini, maxi, npartitions + 1).tolist()
else:
raise TypeError(
'Provided index column is of type "{}". If divisions is not provided the '
"index column type must be numeric or datetime.".format(dtype)
)
parts = []
lowers, uppers = divisions[:-1], divisions[1:]
for i, (lower, upper) in enumerate(zip(lowers, uppers)):
cond = index <= upper if i == len(lowers) - 1 else index < upper
q = sql.select(columns).where(sql.and_(index >= lower, cond)).select_from(table)
parts.append(
delayed(_read_sql_chunk)(
q, uri, meta, engine_kwargs=engine_kwargs, **kwargs
)
)
engine.dispose()
return from_delayed(parts, meta, divisions=divisions)
def _read_sql_chunk(q, uri, meta, engine_kwargs=None, **kwargs):
import sqlalchemy as sa
engine_kwargs = engine_kwargs or {}
engine = sa.create_engine(uri, **engine_kwargs)
df = pd.read_sql(q, engine, **kwargs)
engine.dispose()
if df.empty:
return meta
else:
return df.astype(meta.dtypes.to_dict(), copy=False)
def to_sql(
df,
name: str,
uri: str,
schema=None,
if_exists: str = "fail",
index: bool = True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
compute=True,
parallel=False,
):
"""Store Dask Dataframe to a SQL table
An empty table is created based on the "meta" DataFrame (and conforming to the caller's "if_exists" preference), and
then each block calls pd.DataFrame.to_sql (with `if_exists="append"`).
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
uri : string
Full sqlalchemy URI for the database connection
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
compute : bool, default True
When true, call dask.compute and perform the load into SQL; otherwise, return a Dask object (or array of
per-block objects when parallel=True)
parallel : bool, default False
When true, have each block append itself to the DB table concurrently. This can result in DB rows being in a
different order than the source DataFrame's corresponding rows. When false, load each block into the SQL DB in
sequence.
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create a table from scratch with 4 rows.
>>> import pandas as pd
>>> df = pd.DataFrame([ {'i':i, 's':str(i)*2 } for i in range(4) ])
>>> from dask.dataframe import from_pandas
>>> ddf = from_pandas(df, npartitions=2)
>>> ddf # doctest: +SKIP
Dask DataFrame Structure:
i s
npartitions=2
0 int64 object
2 ... ...
3 ... ...
Dask Name: from_pandas, 2 tasks
>>> from dask.utils import tmpfile
>>> from sqlalchemy import create_engine # doctest: +SKIP
>>> with tmpfile() as f: # doctest: +SKIP
... db = 'sqlite:///%s' %f # doctest: +SKIP
... ddf.to_sql('test', db) # doctest: +SKIP
... engine = create_engine(db, echo=False) # doctest: +SKIP
... result = engine.execute("SELECT * FROM test").fetchall() # doctest: +SKIP
>>> result # doctest: +SKIP
[(0, 0, '00'), (1, 1, '11'), (2, 2, '22'), (3, 3, '33')]
"""
if not isinstance(uri, str):
raise ValueError(f"Expected URI to be a string, got {type(uri)}.")
# This is the only argument we add on top of what Pandas supports
kwargs = dict(
name=name,
con=uri,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
)
if method:
if not PANDAS_GT_0240:
raise NotImplementedError(
"'method' requires pandas>=0.24.0. You have version %s" % PANDAS_VERSION
)
else:
kwargs["method"] = method
def make_meta(meta):
return meta.to_sql(**kwargs)
make_meta = delayed(make_meta)
meta_task = make_meta(df._meta)
# Partitions should always append to the empty table created from `meta` above
worker_kwargs = dict(kwargs, if_exists="append")
if parallel:
# Perform the meta insert, then one task that inserts all blocks concurrently:
result = [
_extra_deps(
d.to_sql,
extras=meta_task,
**worker_kwargs,
dask_key_name="to_sql-%s" % tokenize(d, **worker_kwargs),
)
for d in df.to_delayed()
]
else:
# Chain the "meta" insert and each block's insert
result = []
last = meta_task
for d in df.to_delayed():
result.append(
_extra_deps(
d.to_sql,
extras=last,
**worker_kwargs,
dask_key_name="to_sql-%s" % tokenize(d, **worker_kwargs),
)
)
last = result[-1]
result = dask.delayed(result)
if compute:
dask.compute(result)
else:
return result
@delayed
def _extra_deps(func, *args, extras=None, **kwargs):
return func(*args, **kwargs)
|
bsd-3-clause
|
fbagirov/scikit-learn
|
sklearn/neighbors/graph.py
|
208
|
7031
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
|
bsd-3-clause
|
bastibl/gnuradio
|
gr-filter/examples/decimate.py
|
7
|
6079
|
#!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
from matplotlib import pyplot as mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 10000000 # number of samples to use
self._fs = 10000 # initial sampling rate
self._decim = 20 # Decimation rate
# Generate the prototype filter taps for the decimators with a 200 Hz bandwidth
self._taps = filter.firdes.low_pass_2(1, self._fs,
200, 150,
attenuation_dB=120,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._decim))
print("Number of taps: ", len(self._taps))
print("Number of filters: ", self._decim)
print("Taps per channel: ", tpc)
# Build the input signal source
# We create a list of freqs, and a sine wave is generated and added to the source
# for each one of these frequencies.
self.signals = list()
self.add = blocks.add_cc()
freqs = [10, 20, 2040]
for i in range(len(freqs)):
self.signals.append(analog.sig_source_c(self._fs, analog.GR_SIN_WAVE, freqs[i], 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct a PFB decimator filter
self.pfb = filter.pfb.decimator_ccf(self._decim, self._taps, 0)
# Construct a standard FIR decimating filter
self.dec = filter.fir_filter_ccf(self._decim, self._taps)
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Create the sink for the decimated siganl
self.snk = blocks.vector_sink_c()
self.connect(self.pfb, self.snk)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig1 = pyplot.figure(1, figsize=(16,9))
fig2 = pyplot.figure(2, figsize=(16,9))
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = numpy.blackman
fs = tb._fs
# Plot the input to the decimator
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b")
p1_t = sp1_t.plot(t_in, x_in.imag, "r")
sp1_t.set_ylim([-tb._decim*1.1, tb._decim*1.1])
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot the output of the decimator
fs_o = tb._fs / tb._decim
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk.data()[Ns:Ns+Ne]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(numpy.fft.fftshift(X)))
f_o = numpy.arange(-fs_o / 2.0, fs_o / 2.0, fs_o / float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("PFB Decimated Signal", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_o = 1.0 / fs_o
Tmax_o = len(d)*Ts_o
x_o = numpy.array(d)
t_o = numpy.arange(0, Tmax_o, Ts_o)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o.real, "b-o")
p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_10_10] DDEA sample code/lib_bnlearn.py
|
6
|
12617
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 24 19:24:11 2014
@author: NGO Quang Minh Khiem
@e-mail: [email protected]
"""
from __future__ import division # To forace float point division
import numpy as np
from pandas import DataFrame
# R libs
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
import pandas.rpy.common as com
from rpy2.robjects import pandas2ri
import networkx as nx
import matplotlib.pyplot as plt
#============================================================#
# Utility functions and Misc
#============================================================#
def write_to_file(filename,text):
with open(filename,'w') as f:
f.write(text)
# Close X11 window
def dev_off():
r['dev.off']()
#============================================================#
# Methods for Plotting
#============================================================#
# visualize graph from adjacence matrix r_graph
# for quick usage: set simple=True (by default)
# otherwise, function allows customize some properties of the graph
def nx_plot(r_graph, cols_names, simple=True, labels=None, graph_layout='shell',
node_size=1600, node_color='blue', node_alpha=0.3,
node_text_size=12,
edge_color='blue', edge_alpha=0.3, edge_tickness=1,
edge_text_pos=0.3,
text_font='sans-serif'):
#G = nx.Graph()
dg = nx.DiGraph()
edges = []
np_amat = np.asarray(bnlearn.amat(r_graph))
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] == 1:
#G.add_edge(cols_names[ri],cols_names[ci])
dg.add_edge(cols_names[ri],cols_names[ci])
edges.append((cols_names[ri],cols_names[ci]))
#import pdb;pdb.set_trace()
if simple:
if graph_layout=='spectral':
nx.draw_spectral(dg,font_size=node_text_size)
elif graph_layout=='random':
nx.draw_random(dg,font_size=node_text_size)
elif graph_layout=='circular':
nx.draw_circular(dg,font_size=node_text_size)
elif graph_layout=='spring':
nx.draw_spring(dg,font_size=node_text_size)
else:
nx.draw(dg,font_size=node_text_size)
else:
draw_graph(edges,directed=True, labels=labels, graph_layout=graph_layout,
node_size=node_size, node_color=node_color, node_alpha=node_alpha,
node_text_size=node_text_size,
edge_color=edge_color, edge_alpha=edge_alpha, edge_tickness=edge_tickness,
edge_text_pos=edge_text_pos,
text_font=text_font)
#nxlib.draw_graph(dg,labels=cols_names)
def nx_plot2(r_graph,cols_names,is_bnlearn=True):
G = nx.Graph()
dg = nx.DiGraph()
if is_bnlearn:
np_amat = np.asarray(bnlearn.amat(r_graph))
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] == 1:
G.add_edge(cols_names[ri],cols_names[ci])
dg.add_edge(cols_names[ri],cols_names[ci])
else:
np_amat = np.asarray(r_graph)
for ri in range(np_amat.shape[0]):
for ci in range(np_amat.shape[1]):
if np_amat[ri,ci] >= 0:
#G.add_weighted_edges_from([(cols_names[ri],cols_names[ci],{'weight': np_amat[ri,ci]})])
G.add_edge(cols_names[ri],cols_names[ci],weight=np_amat[ri,ci])
#dg.add_weighted_edges_from([(cols_names[ri],cols_names[ci],np_amat[ri,ci])])
#nx.draw(G,nx.shell_layout)
nx.draw(G)
#nxlib.draw_graph(dg,labels=cols_names)
# a more generic graph plotting function, using networkx lib
# graph is a list of edges
def draw_graph(graph, directed=True, labels=None, graph_layout='shell',
node_size=1600, node_color='blue', node_alpha=0.3,
node_text_size=12,
edge_color='blue', edge_alpha=0.3, edge_tickness=1,
edge_text_pos=0.3,
text_font='sans-serif'):
# create networkx graph
#G=nx.Graph()
if directed:
G = nx.DiGraph()
else:
G = nx.Graph()
# add edges
for edge in graph:
G.add_edge(edge[0], edge[1])
# these are different layouts for the network you may try
# shell seems to work best
if graph_layout == 'spring':
graph_pos=nx.spring_layout(G)
elif graph_layout == 'spectral':
graph_pos=nx.spectral_layout(G)
elif graph_layout == 'random':
graph_pos=nx.random_layout(G)
else:
graph_pos=nx.shell_layout(G)
# draw graph
nx.draw_networkx_nodes(G,graph_pos,node_size=node_size,
alpha=node_alpha, node_color=node_color)
nx.draw_networkx_edges(G,graph_pos,width=edge_tickness,
alpha=edge_alpha,edge_color=edge_color)
nx.draw_networkx_labels(G, graph_pos,font_size=node_text_size,
font_family=text_font)
"""
if labels is None:
labels = range(len(graph))
edge_labels = dict(zip(graph, labels))
"""
if labels is not None:
edge_labels = dict(zip(graph, labels))
nx.draw_networkx_edge_labels(G, graph_pos, edge_labels=edge_labels,
label_pos=edge_text_pos)
# show graph
plt.show()
#============================================================#
# bnlearn wrapper APIs
#============================================================#
###
# Construct list of arcs used for blacklisting/whitelisting
# arc list is a list of arcs. For example:
# arc_list =
# [['A','B'] , ['A','C']]
#
# return data frame in the following format
# from to
# 0 A B
# 1 A C
###
def construct_arcs_frame(arc_list):
data_frame = DataFrame(data=np.array(arc_list),columns=['from','to'])
return data_frame
def print_bw_rules():
rules = """
============================================================
Blacklisting Rules:
-------------------
1. any arc blacklisted in one of its possible directions is never present in the graph.
if A-->B is blacklisted (but B-->A is not), A-->B and A--B are never
present in the graph (but not B-->A)
2. any arc blacklisted in both directions, as well as the corresponding
undirected arc, is never present in the graph.
B(A-->B,B-->A) => B(A--B)
Whitelisting Rules:
-------------------
1. arcs whitelisted in one direction only (i.e. A-->B is whitelisted but B-->A is not)
have the respective reverse arcs blacklisted,
and are always present in the graph.
W(A-->B) => B(B-->A,A--B)
2. arcs whitelisted in both directions (i.e. both A--> B and B-->A are whitelisted)
are present in the graph,
but their direction is set by the learning algorithm.
3. any arc whitelisted and blacklisted at the same time is assumed to be whitelisted,
and is thus removed from the blacklist.
============================================================
"""
print rules
def convert_pymat_to_rfactor(py_mat):
mat_shape = py_mat.shape
r_factor_vec = r.factor(py_mat)
r_factor_mat = r.matrix(r_factor_vec, nrow=mat_shape[1], byrow=True)
return np.array(r_factor_mat).reshape(mat_shape[0],mat_shape[1],order='C')
def construct_data_frame(data_mat,columns=[]):
if len(columns) == 0:
column_names = range(data_mat.shape[1])
else:
column_names = columns
return DataFrame(data=data_mat,columns=column_names)
"""
def py_bnlearn(data_frame,method='gs',blacklist=None, whitelist=None):
# For hill-climbing, the data must be real or factor
#
if method == 'hc':
bn_structure = bnlearn.hc(data_frame)
else:
bn_structure = bnlearn.gs(data_frame)
return bn_structure
"""
#============================================================#
# APIs related to bn_learn structure
#============================================================#
#=======================|
# bn structure and graph|
#=======================|
def acyclic(bn_structure):
return bool(bnlearn.acyclic(bn_structure)[0])
def amat(bn_structure):
return np.array(bnlearn.amat(bn_structure))
def py_get_amat(bn_structure):
return np.array(bnlearn.amat(bn_structure))
#=======================|
# Arcs |
#=======================|
def narcs(bn_structure):
return bnlearn.narcs(bn_structure)[0]
def arcs(bn_structure):
arcs = np.array(bnlearn.arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def directed_arcs(bn_structure):
arcs = np.array(bnlearn.directed_arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def undirected_arcs(bn_structure):
arcs = np.array(bnlearn.undirected_arcs(bn_structure))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def incoming_arcs(bn_structure, node_name):
arcs = np.array(bnlearn.incoming_arcs(bn_structure, node_name))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
def outgoing_arcs(bn_structure, node_name):
arcs = np.array(bnlearn.outgoing_arcs(bn_structure, node_name))
ncols = 2
nrows = len(arcs) / 2
arcs = arcs.reshape(nrows,ncols,order='F')
return arcs
#=======================|
# Nodes |
#=======================|
def nnodes(bn_structure):
return bnlearn.nnodes(bn_structure)[0]
def degree(bn_structure, node_name):
return bnlearn.degree(bn_structure, node_name)[0]
def in_degree(bn_structure, node_name):
return bnlearn.in_degree(bn_structure, node_name)[0]
def out_degree(bn_structure, node_name):
return bnlearn.out_degree(bn_structure, node_name)[0]
def root_nodes(bn_structure):
return np.array(bnlearn.root_nodes(bn_structure))
def leaf_nodes(bn_structure):
return np.array(bnlearn.leaf_nodes(bn_structure))
def children(bn_structure, node_name):
return np.array(bnlearn.children(bn_structure, node_name))
def parents(bn_structure, node_name):
return np.array(bnlearn.parents(bn_structure, node_name))
def nbr(bn_structure, node_name):
return np.array(bnlearn.nbr(bn_structure, node_name))
#=======================|
# bn fit |
#=======================|
###
# To fit data to bn structure, the graph must be completely directed
###
def py_bn_fit(bn_structure,data_frame):
fit = bnlearn.bn_fit(bn_structure,data_frame)
return fit
def py_get_node_cond_mat(fit,node_indx):
"""
Each item in fit is a list vector with dimension attributes
fit[node_indx] has 4 attributes ['node', 'parents', 'children', 'prob']
"""
node_fit = fit[node_indx]
node = node_fit[0]
parents = node_fit[1]
children = node_fit[2]
prob = node_fit[3]
"""
prob is a vector Array type in R, which contains the conditional
probability table of this node.
prob is a (n_0 x n_1 x ... x n_parents) matrix, where each n_i is the number
of discrete values of each node in the list prob_dimnames
prob_dimnames contains the name of each dimension.
"""
prob_dimnames = np.array(prob.dimnames.names)
prob_factors = np.array(prob.dimnames)
prob_mat = np.array(prob)
#prob_frame = DataFrame(data=prob_mat[0],columns=prob_dimnames)
return prob_dimnames,prob_factors,prob_mat
def bn_fit_barchart(fit, node_idx):
print bnlearn.bn_fit_barchart(fit[node_idx])
def bn_fit_dotplot(fit, node_idx):
print bnlearn.bn_fit_dotplot(fit[node_idx])
#==========================================================================#
#==========================================================================#
#==========================================================================#
#============================================================#
# Use R bnlearn to learn the Bayes network structure
#============================================================#
### BN Learn
## load some R libs
r = robjects.r
utils = importr("utils")
bnlearn = importr("bnlearn")
#rgraphviz = importr("Rgraphviz")
pandas2ri.activate() ### this is important to seamlessly convert from pandas to R data frame
"""
a = com.load_data('learning.test')
#d = construct_data_frame(a)
gs = py_bnlearn(a)
amat = py_get_amat(gs)
#fit = py_bn_fit(gs,a)
"""
|
gpl-2.0
|
letsgoexploring/data
|
useconomicdata/python/usProductionData.py
|
1
|
15611
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as dts
from fredpy import series, window_equalize
import pandas as pd
import runProcs
# get_ipython().magic('matplotlib inline')
# In[2]:
# 0. Setup: Formatting commands and definitions.
# 0.1 general plot settings
# Make all plotted axis lables and tick lables bold 15 pt font
font = {'weight' : 'bold',
'size' : 15}
axes={'labelweight' : 'bold'}
plt.rc('font', **font)
plt.rc('axes', **axes)
# Add some space around the tick lables for better readability
plt.rcParams['xtick.major.pad']='8'
plt.rcParams['ytick.major.pad']='8'
# 0.2 Formatter for inserting commas in y axis labels with magnitudes in the thousands
def func(x, pos): # formatter function takes tick label and tick position
s = '{:0,d}'.format(int(x))
return s
y_format = plt.FuncFormatter(func) # make formatter
# 0.3 format the x axis ticksticks
years2,years4,years5,years10,years15= dts.YearLocator(2),dts.YearLocator(4),dts.YearLocator(5),dts.YearLocator(10),dts.YearLocator(15)
# 0.4 y label locator for vertical axes plotting gdp
majorLocator_y = plt.MultipleLocator(3)
majorLocator_shares = plt.MultipleLocator(0.2)
# In[3]:
# 1. Setup for the construction of K and A
# 1.1 Parameters for the model
alpha = 0.35
# If output_solow == TRUE, then Y = C + I. Else: Y = C + I + G + NX (default)
output_solow = False
# 1.3 Define the function for computing the capital series
def capitalSeries(i,k0,delta):
t0 = len(i)-1
k = [k0]
for t in range(t0):
k.append(i[t]+(1-delta)*k[t])
return np.array(k)
# In[4]:
# 2. Import and manage data from FRED
# 2.1 Annual data
investmentA = series('GPDIA')
consumptionA = series('PCECA')
governmentA = series('GCEA')
exportsA = series('EXPGSA')
importsA = series('IMPGSA')
netExportsA = series('A019RC1A027NBEA')
deflatorA = series('A191RD3A086NBEA')
depreciationA = series('Y0000C1A027NBEA')
gdpA = series('GDPA')
tfpA = series('GDPA')
capitalA = series('GDPA')
laborA = series('B4701C0A222NBEA')# BEA index: fred('HOANBS') / .quartertoannual(method='AVG')
# annualSeries = [investmentA,consumptionA,governmentA,exportsA,importsA,netExportsA,deflatorA,depreciationA,gdpA,tfpA,capitalA,laborA]
investmentA,consumptionA,governmentA,netExportsA,exportsA,importsA,deflatorA,depreciationA,gdpA,tfpA,capitalA,laborA = window_equalize([investmentA,consumptionA,governmentA,netExportsA,exportsA,importsA,deflatorA,depreciationA,gdpA,tfpA,capitalA,laborA])
# 2.2 Compute real annual data series
investmentA.data= 100*investmentA.data/deflatorA.data
consumptionA.data = 100*consumptionA.data/deflatorA.data
governmentA.data = 100*governmentA.data/deflatorA.data
exportsA.data = 100*exportsA.data/deflatorA.data
importsA.data = 100*importsA.data/deflatorA.data
netExportsA.data = 100*netExportsA.data/deflatorA.data
gdpA.data= 100*gdpA.data/deflatorA.data
TA = len(investmentA.data)
# 2.3 Convert labor from millions of hours to billions
laborA.data = laborA.data/1000
# 2.4 Quarterly data
investmentQ = series('GPDI')
investmentQ4 = series('GPDI')
consumptionQ = series('PCEC')
governmentQ = series('GCE')
exportsQ = series('EXPGS')
importsQ = series('IMPGS')
netExportsQ = series('NETEXP')
deflatorQ = series('GDPDEF')
gdpQ = series('GDP')
tfpQ = series('GDP')
capitalQ = series('GDP')
laborQ = series('HOANBS') # L = fred('B4701C0A222NBEA')
# quarterlySeries = [investmentQ,investmentQ4,consumptionQ,governmentQ,exportsQ,importsQ,netExportsQ,deflatorQ,gdpQ,tfpQ,capitalQ,laborQ]
investmentQ,investmentQ4,consumptionQ,governmentQ,netExportsQ,exportsQ,importsQ,deflatorQ,gdpQ,tfpQ,capitalQ,laborQ = window_equalize([investmentQ,investmentQ4,consumptionQ,governmentQ,netExportsQ,exportsQ,importsQ,deflatorQ,gdpQ,tfpQ,capitalQ,laborQ])
# 2.5 Compute real annual data series
investmentQ.data= 100*investmentQ.data/deflatorQ.data
investmentQ4.data= 100*investmentQ4.data/deflatorQ.data
consumptionQ.data = 100*consumptionQ.data/deflatorQ.data
governmentQ.data = 100*governmentQ.data/deflatorQ.data
netExportsQ.data = 100*netExportsQ.data/deflatorQ.data
exportsQ.data = 100*exportsQ.data/deflatorQ.data
importsQ.data = 100*importsQ.data/deflatorQ.data
gdpQ.data= 100*gdpQ.data/deflatorQ.data
TQ = len(investmentQ.data)
# 2.6 Compute real annual data series. Note that investment is at a quarterly rate
investmentQ4.data= investmentQ.data/4
realGdpQ= 100*gdpQ.data/deflatorQ.data
# 2.7 Find the base year for the deflator:
baseYear = deflatorA.units[6:10]
laborBaseYear= laborQ.units[6:10]
# In[5]:
# 3. Parameter calibration using the annual series
# 3.1 Use Y = C + I as the measure for output if that was requested above
if output_solow == True:
y0A= consumptionA.data+investmentA.data
gdpA.data = y0A
y0Q = consumptionQ.data+investmentQ.data
gdpQ.data = y0Q
# 3.2 form the ratios of depreciation and investment to output
# depreciationYRatio= np.mean([d/y for d,y in zip(depreciationA.data,gdpA.data)])
iYRatio = np.mean(investmentA.data/gdpA.data)
# 3.3 compute the annual growth rates of output and investment
growthY = (gdpA.data[-1]/gdpA.data[0])**(1/(TA-1))-1
growthI = (investmentA.data[-1]/investmentA.data[0])**(1/(TA-1))-1
growthL = (laborA.data[-1]/laborA.data[0])**(1/(TA-1))-1
g = growthY
n = growthL
# 3.4 Compute delta based on requirement that K/Y = 2.5
delta = iYRatio/2.5-g-n
# 3.5 print the computed rates for inspection
print('gI:' ,growthI)
print('gY:' ,growthY)
print('gL:' ,growthL)
print('delta:',delta)
print('s:', iYRatio)
print('g:', g)
# In[6]:
# 4. Implement the perpetual inventory method
# 4.1 Annual capital series
k0A = gdpA.data[0]*iYRatio/(delta + g + n)
capitalA.data = capitalSeries(investmentA.data,k0A,delta)
# 4.2 Quarterly capital series
k0Q = gdpQ.data[0]*iYRatio/(delta + g + n)
capitalQ.data = capitalSeries(investmentQ4.data,k0Q,delta/4)
# In[7]:
# 5. Plot the capital series. Note that the annual and quarterly series should and do align approximately.
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot_date(capitalA.datetimes,capitalA.data/1000,'b-',lw = 3)
ax.plot_date(capitalQ.datetimes,capitalQ.data/1000,'r-',lw = 3)
ax.xaxis.set_major_locator(years10)
ax.set_ylabel('Trillions of \n '+baseYear+' $')
capitalA.recessions()
fig.autofmt_xdate()
plt.title('Capital Stock')
ax.legend(['Annual','Quarterly'],loc='upper left')
ax.grid(True)
# plt.savefig('fig_US_Production_Capital_QA.png',bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot_date(capitalA.datetimes,capitalA.data/1000,'b-',lw = 3)
ax.xaxis.set_major_locator(years10)
ax.set_ylabel('Trillions of \n '+baseYear+' $')
capitalA.recessions()
fig.autofmt_xdate()
plt.title('Capital Stock: Annual')
ax.grid(True)
# plt.savefig('fig_US_Production_Capital_A.png',bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot_date(capitalQ.datetimes,capitalQ.data/1000,'b-',lw = 3)
ax.xaxis.set_major_locator(years10)
ax.set_ylabel('Trillions of \n '+baseYear+' $')
capitalA.recessions()
fig.autofmt_xdate()
plt.title('Capital Stock: Quarterly')
ax.grid(True)
# plt.savefig('../img/fig_US_Production_Capital_A.png',bbox_inches='tight')
# In[8]:
# 6. Save data to csv files
# 6.1 Annual data
YearA = [a[0:4] for a in capitalA.dates]
OutputA = [round(x,1) for x in gdpA.data]
ConsumptionA= [round(x,1) for x in consumptionA.data]
InvestmentA = [round(x,1) for x in investmentA.data]
GovernmentA = [round(x,1) for x in governmentA.data]
ImportsA = [round(x,1) for x in importsA.data]
ExportsA = [round(x,1) for x in exportsA.data]
NetExportsA = [round(x,1) for x in netExportsA.data]
CapitalA = [round(x,1) for x in capitalA.data]
LaborA = [round(x,4) for x in laborA.data]
columnsA = ['Year','GDP [Bil. of '+baseYear+' Dollars]','Consumption [Bil. of '+baseYear+' Dollars]','Investment [Bil. of '+baseYear+' Dollars]','Government Purchases [Bil. of '+baseYear+' Dollars]','Exports [Bil. of '+baseYear+' Dollars]','Imports [Bil. of '+baseYear+' Dollars]','Net Exports [Bil. of '+baseYear+' Dollars]','Capital [Bil. of '+baseYear+' Dollars]','Labor [Bil. of Hours]']
df = pd.DataFrame({
'Year':YearA,
'GDP [Bil. of '+baseYear+' Dollars]':OutputA,
'Consumption [Bil. of '+baseYear+' Dollars]':ConsumptionA,
'Investment [Bil. of '+baseYear+' Dollars]':InvestmentA,
'Government Purchases [Bil. of '+baseYear+' Dollars]':GovernmentA,
'Exports [Bil. of '+baseYear+' Dollars]':ExportsA,
'Imports [Bil. of '+baseYear+' Dollars]':ImportsA,
'Net Exports [Bil. of '+baseYear+' Dollars]':NetExportsA,
'Capital [Bil. of '+baseYear+' Dollars]':CapitalA,
'Labor [Bil. of Hours]':LaborA})
df = df[columnsA]
df.to_csv('../csv/US_Production_A_Data.csv',index=False)
# 6.2 Quarterly data
DateQ = [a for a in capitalQ.dates]
OutputQ = [round(x,1) for x in gdpQ.data]
ConsumptionQ= [round(x,1) for x in consumptionQ.data]
InvestmentQ = [round(x,1) for x in investmentQ.data]
GovernmentQ = [round(x,1) for x in governmentQ.data]
ImportsQ = [round(x,1) for x in importsQ.data]
ExportsQ = [round(x,1) for x in exportsQ.data]
NetExportsQ = [round(x,1) for x in netExportsQ.data]
CapitalQ = [round(x,1) for x in capitalQ.data]
LaborQ = [round(x,1) for x in laborQ.data]
columnsQ = ['Date','GDP [Bil. of '+baseYear+' Dollars]','Consumption [Bil. of '+baseYear+' Dollars]','Investment [Bil. of '+baseYear+' Dollars]','Government Purchases [Bil. of '+baseYear+' Dollars]','Exports [Bil. of '+baseYear+' Dollars]','Imports [Bil. of '+baseYear+' Dollars]','Net Exports [Bil. of '+baseYear+' Dollars]','Capital [Bil. of '+baseYear+' Dollars]','Labor [Index: '+laborBaseYear+'=100]']
df = pd.DataFrame({
'Date':DateQ,
'GDP [Bil. of '+baseYear+' Dollars]':OutputQ,
'Consumption [Bil. of '+baseYear+' Dollars]':ConsumptionQ,
'Investment [Bil. of '+baseYear+' Dollars]':InvestmentQ,
'Government Purchases [Bil. of '+baseYear+' Dollars]':GovernmentQ,
'Exports [Bil. of '+baseYear+' Dollars]':ExportsQ,
'Imports [Bil. of '+baseYear+' Dollars]':ImportsQ,
'Net Exports [Bil. of '+baseYear+' Dollars]':NetExportsQ,
'Capital [Bil. of '+baseYear+' Dollars]':CapitalQ,
'Labor [Index: '+laborBaseYear+'=100]':LaborQ})
df = df[columnsQ]
df.to_csv('../csv/US_Production_Q_Data.csv',index=False)
# In[9]:
# 7. Compute the Solow residuals:
# 7.1 Annual residual
capitalA = capitalA.apc()
tfpA = tfpA.apc()
laborA = laborA.apc()
gdpA = gdpA.apc()
consumptionA = consumptionA.apc()
investmentA = investmentA.apc()
governmentA = governmentA.apc()
exportsA = exportsA.apc()
importsA = importsA.apc()
# netExportsA = netExportsA.apc()
gYA = gdpA.data
gLA = laborA.data
gKA = capitalA.data
tfpA.data = gYA - alpha*gKA - (1-alpha)*gLA
# 7.2. Compute the Solow residual: Quarterly
capitalQ = capitalQ.apc()
tfpQ = tfpQ.apc()
laborQ = laborQ.apc()
gdpQ = gdpQ.apc()
consumptionQ = consumptionQ.apc()
investmentQ = investmentQ.apc()
governmentQ = governmentQ.apc()
exportsQ = exportsQ.apc()
importsQ = importsQ.apc()
netExportsQ.data = np.array(netExportsQ.data)
# netExportsQ = netExportsQ.apc()
gYQ = gdpQ.data
gLQ = laborQ.data
gKQ = capitalQ.data
tfpQ.data = gYQ - alpha*gKQ - (1-alpha)*gLQ
# In[10]:
# 11. Construct some plots
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot_date(gdpA.datetimes,gdpA.data,'b-',lw = 3)
ax.plot_date(tfpA.datetimes,tfpA.data,'g-',lw = 3)
ax.xaxis.set_major_locator(years10)
ax.set_ylabel('%')
gdpA.recessions()
fig.autofmt_xdate()
ax.grid(True)
ax.legend(['GDP growth','Solow Residual'],bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.,fontsize=15)
# plt.savefig('fig_US_Production_ya_growth_A.png',bbox_inches='tight')
# 11.2 Figure for website: Annual growth in Y, L, K, and A
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(2,2,1)
ax.plot_date(tfpA.datetimes,tfpA.data,'b-',lw = 3,alpha = 0.75)
ax.set_title('TFP Growth')
ax.xaxis.set_major_locator(years10)
ax.set_ylabel('%')
tfpA.recessions()
fig.autofmt_xdate()
ax.locator_params(axis='y',nbins=6)
ax.grid(True)
ax = fig.add_subplot(2,2,2)
ax.plot_date(gdpA.datetimes,gdpA.data,'b-',lw = 3,alpha = 0.75)
ax.set_title('Real GDP Growth')
ax.xaxis.set_major_locator(years10)
ax.locator_params(axis='y',nbins=6)
gdpA.recessions()
fig.autofmt_xdate()
ax.grid(True)
ax = fig.add_subplot(2,2,3)
ax.plot_date(laborA.datetimes,laborA.data,'b-',lw = 3,alpha = 0.75)
ax.xaxis.set_major_locator(years10)
laborA.recessions()
ax.set_ylabel('%')
ax.set_title('Labor Growth')
ax.locator_params(axis='y',nbins=6)
fig.autofmt_xdate()
ax.grid(True)
ax = fig.add_subplot(2,2,4)
ax.plot_date(capitalA.datetimes,capitalA.data,'b-',lw = 3,alpha = 0.75)
ax.xaxis.set_major_locator(years10)
ax.set_title('Capital Growth')
ax.locator_params(axis='y',nbins=6)
capitalA.recessions()
fig.autofmt_xdate()
ax.grid(True)
plt.savefig('../img/fig_US_Production_A_site.png',bbox_inches='tight')
# 11.3 Quarterly GDP growth and the SOlow residual
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot_date(gdpQ.datetimes,gdpQ.data,'b-',lw = 3)
ax.plot_date(tfpQ.datetimes,tfpQ.data,'g-',lw = 3)
ax.xaxis.set_major_locator(years10)
ax.set_ylabel('%')
gdpQ.recessions()
fig.autofmt_xdate()
ax.grid(True)
ax.legend(['GDP growth','Solow Residual'],bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.,fontsize=15)
# plt.savefig('../img/fig_US_Production_ya_growth_Q.png',bbox_inches='tight')
# In[11]:
# 10. Save growth rate data to csv files
# 10.1 Annual data
YearA = [a[0:4] for a in tfpA.dates]
CapitalA = [round(x,1) for x in capitalA.data]
LaborA = [round(x,1) for x in laborA.data]
OutputA = [round(x,1) for x in gdpA.data]
ConsumptionA = [round(x,1) for x in consumptionA.data]
InvestmentA = [round(x,1) for x in investmentA.data]
GovernmentA = [round(x,1) for x in governmentA.data]
ExportsA = [round(x,1) for x in exportsA.data]
ImportsA = [round(x,1) for x in importsA.data]
NetExportsA = [round(x,1) for x in netExportsA.data]
columnsA = ['Year','GDP Growth','Consumption Growth','Investment Growth','Government Purchases Growth','Exports Growth','Imports Growth','Capital Growth','Labor Growth']
data = [YearA,OutputA,ConsumptionA,InvestmentA,GovernmentA,ExportsA,ImportsA,CapitalA,LaborA]
dA ={}
for n,c in enumerate(columnsA):
dA[columnsA[n]]=data[n]
df = pd.DataFrame(dA)
df = df[columnsA]
df.to_csv('../csv/US_Production_A_Data_Growth_Rates.csv',index=False)
# 10.2 Quarterly data
DateQ = [a for a in tfpQ.dates]
CapitalQ = [round(x,1) for x in capitalQ.data]
LaborQ = [round(x,1) for x in laborQ.data]
OutputQ = [round(x,1) for x in gdpQ.data]
ConsumptionQ = [round(x,1) for x in consumptionQ.data]
InvestmentQ = [round(x,1) for x in investmentQ.data]
GovernmentQ = [round(x,1) for x in governmentQ.data]
ExportsQ = [round(x,1) for x in exportsQ.data]
ImportsQ = [round(x,1) for x in importsQ.data]
NetExportsQ = [round(x,1) for x in netExportsQ.data]
columnsQ = ['Year','GDP Growth','Consumption Growth','Investment Growth','Government Purchases Growth','Exports Growth','Imports Growth','Capital Growth','Labor Growth']
data = [DateQ,OutputQ,ConsumptionQ,InvestmentQ,GovernmentQ,ExportsQ,ImportsQ,CapitalQ,LaborQ]
dQ ={}
for n,c in enumerate(columnsQ):
dQ[columnsQ[n]]=data[n]
df = pd.DataFrame(dQ)
df = df[columnsQ]
df.to_csv('../csv/US_Production_Q_Data_Growth_Rates.csv',index=False)
# In[12]:
# 11. Export notebook to python script
progName = 'usProductionData'
runProcs.exportNb(progName)
|
mit
|
paultcochrane/bokeh
|
examples/charts/file/boxplot.py
|
37
|
1117
|
from collections import OrderedDict
import pandas as pd
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.olympics2014 import data
# create a DataFrame with the sample data
df = pd.io.json.json_normalize(data['data'])
# filter by countries with at least one medal and sort
df = df[df['medals.total'] > 0]
df = df.sort("medals.total", ascending=False)
# get the countries and group the data by medal type
countries = df.abbr.values.tolist()
gold = df['medals.gold'].astype(float).values
silver = df['medals.silver'].astype(float).values
bronze = df['medals.bronze'].astype(float).values
# build a dict containing the grouped data
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
# any of the following commented are valid BoxPlot inputs
#medals = pd.DataFrame(medals)
#medals = list(medals.values())
#medals = tuple(medals.values())
#medals = np.array(list(medals.values()))
output_file("boxplot.html")
boxplot = BoxPlot(
medals, marker='circle', outliers=True, title="boxplot test",
xlabel="medal type", ylabel="medal count", width=800, height=600)
show(boxplot)
|
bsd-3-clause
|
matthiasdiener/spack
|
var/spack/repos/builtin/packages/py-espressopp/package.py
|
3
|
3452
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyEspressopp(CMakePackage):
"""ESPResSo++ is an extensible, flexible, fast and parallel simulation
software for soft matter research. It is a highly versatile software
package for the scientific simulation and analysis of coarse-grained
atomistic or bead-spring models as they are used in soft matter research
"""
homepage = "https://espressopp.github.io"
url = "https://github.com/espressopp/espressopp/tarball/v1.9.4.1"
version('develop', git='https://github.com/espressopp/espressopp.git', branch='master')
version('1.9.5', '13a93c30b07132b5e5fa0d828aa17d79')
version('1.9.4.1', '0da74a6d4e1bfa6a2a24fca354245a4f')
version('1.9.4', 'f2a27993a83547ad014335006eea74ea')
variant('ug', default=False, description='Build user guide')
variant('pdf', default=False, description='Build user guide in pdf format')
variant('dg', default=False, description='Build developer guide')
depends_on("[email protected]:", type='build')
depends_on("mpi")
depends_on("boost+serialization+filesystem+system+python+mpi", when='@1.9.4:')
extends("python")
depends_on("python@2:2.8")
depends_on("[email protected]:", when='@1.9.4', type=('build', 'run'))
depends_on("[email protected]:", when='@1.9.4.1:', type=('build', 'run'))
depends_on("fftw")
depends_on("py-sphinx", when="+ug", type='build')
depends_on("py-sphinx", when="+pdf", type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', when="+ug", type='build')
depends_on('py-matplotlib', when="+pdf", type='build')
depends_on("texlive", when="+pdf", type='build')
depends_on("doxygen", when="+dg", type='build')
def cmake_args(self):
return [
'-DEXTERNAL_MPI4PY=ON',
'-DEXTERNAL_BOOST=ON',
'-DWITH_RC_FILES=OFF'
]
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
if '+ug' in spec:
make("ug", parallel=False)
if '+pdf' in spec:
make("ug-pdf", parallel=False)
if '+dg' in spec:
make("doc", parallel=False)
|
lgpl-2.1
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/pandas/core/reshape/util.py
|
20
|
1915
|
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
"""
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
See also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
pandas.compat.product : An alias for itertools.product.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
|
mit
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py
|
25
|
13554
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DataFeeder`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class DataFeederTest(test.TestCase):
# pylint: disable=undefined-variable
"""Tests for `DataFeeder`."""
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def _wrap_dict(self, data, prepend=''):
return {prepend + '1': data, prepend + '2': data}
def _assert_raises(self, input_data):
with self.assertRaisesRegexp(TypeError, 'annot convert'):
data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data):
feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1)
if isinstance(input_data, dict):
for v in list(feeder.input_dtype.values()):
self.assertEqual(expected_np_dtype, v)
else:
self.assertEqual(expected_np_dtype, feeder.input_dtype)
with ops.Graph().as_default() as g, self.session(g):
inp, _ = feeder.input_builder()
if isinstance(inp, dict):
for v in list(inp.values()):
self.assertEqual(expected_tf_dtype, v.dtype)
else:
self.assertEqual(expected_tf_dtype, inp.dtype)
def test_input_int8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int8)
self._assert_dtype(np.int8, dtypes.int8, data)
self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data))
def test_input_int16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int16)
self._assert_dtype(np.int16, dtypes.int16, data)
self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data))
def test_input_int32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int32)
self._assert_dtype(np.int32, dtypes.int32, data)
self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data))
def test_input_int64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.int64)
self._assert_dtype(np.int64, dtypes.int64, data)
self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data))
def test_input_uint32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32)
self._assert_dtype(np.uint32, dtypes.uint32, data)
self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data))
def test_input_uint64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64)
self._assert_dtype(np.uint64, dtypes.uint64, data)
self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data))
def test_input_uint8(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8)
self._assert_dtype(np.uint8, dtypes.uint8, data)
self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data))
def test_input_uint16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16)
self._assert_dtype(np.uint16, dtypes.uint16, data)
self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data))
def test_input_float16(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float16)
self._assert_dtype(np.float16, dtypes.float16, data)
self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data))
def test_input_float32(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float32)
self._assert_dtype(np.float32, dtypes.float32, data)
self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data))
def test_input_float64(self):
data = np.matrix([[1, 2], [3, 4]], dtype=np.float64)
self._assert_dtype(np.float64, dtypes.float64, data)
self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data))
def test_input_bool(self):
data = np.array([[False for _ in xrange(2)] for _ in xrange(2)])
self._assert_dtype(np.bool, dtypes.bool, data)
self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data))
def test_input_string(self):
input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)])
self._assert_dtype(input_data.dtype, dtypes.string, input_data)
self._assert_dtype(input_data.dtype, dtypes.string,
self._wrap_dict(input_data))
def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None):
def func(x):
val = getattr(x, src_prop) if src_prop else x
return val if src_key_of is None else src_key_of[val]
if isinstance(src, dict):
for k in list(src.keys()):
self.assertAllClose(func(src[k]), dest)
else:
self.assertAllClose(func(src), dest)
def test_unsupervised(self):
def func(feeder):
with self.cached_session():
inp, _ = feeder.input_builder()
feed_dict_fn = feeder.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[1, 2]], feed_dict, 'name')
data = np.matrix([[1, 2], [2, 3], [3, 4]])
func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data), None, n_classes=0, batch_size=1))
def test_data_feeder_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [2, 1], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
def test_epoch(self):
def func(feeder):
with self.cached_session():
feeder.input_builder()
epoch = feeder.make_epoch_variable()
feed_dict_fn = feeder.get_feed_dict_fn()
# First input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Second input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Third input
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [0])
# Back to the first input again, so new epoch.
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[epoch.name], [1])
data = np.matrix([[1, 2], [2, 3], [3, 4]])
labels = np.array([0, 0, 1])
func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1))
func(
data_feeder.DataFeeder(
self._wrap_dict(data, 'in'),
self._wrap_dict(labels, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=1))
def test_data_feeder_multioutput_regression(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[1, 2], [3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
def test_data_feeder_multioutput_classification(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self._assertAllClose(
out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict,
'name')
x = np.matrix([[1, 2], [3, 4]])
y = np.array([[0, 1, 2], [2, 3, 4]])
func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(5, 'out'),
batch_size=2))
def test_streaming_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name')
self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name')
def x_iter(wrap_dict=False):
yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict(
np.array([[1, 2]]), 'in')
yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict(
np.array([[3, 4]]), 'in')
def y_iter(wrap_dict=False):
yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[1], [2]]), 'out')
yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict(
np.array([[2], [2]]), 'out')
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=2))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=2))
# Test non-full batches.
func(
data_feeder.StreamingDataFeeder(
x_iter(), y_iter(), n_classes=0, batch_size=10))
func(
data_feeder.StreamingDataFeeder(
x_iter(True),
y_iter(True),
n_classes=self._wrap_dict(0, 'out'),
batch_size=10))
def test_dask_data_feeder(self):
if HAS_PANDAS and HAS_DASK:
x = pd.DataFrame(
dict(
a=np.array([.1, .3, .4, .6, .2, .1, .6]),
b=np.array([.7, .8, .1, .2, .5, .3, .9])))
x = dd.from_pandas(x, npartitions=2)
y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2])))
y = dd.from_pandas(y, npartitions=2)
# TODO(ipolosukhin): Remove or restore this.
# x = extract_dask_data(x)
# y = extract_dask_labels(y)
df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2)
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1],
[0.60000002, 0.2]])
self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]])
# TODO(rohanj): Fix this test by fixing data_feeder. Currently, h5py doesn't
# support permutation based indexing lookups (More documentation at
# http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing)
def DISABLED_test_hdf5_data_feeder(self):
def func(df):
inp, out = df.input_builder()
feed_dict_fn = df.get_feed_dict_fn()
feed_dict = feed_dict_fn()
self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name')
self.assertAllClose(out, [2, 1], feed_dict, 'name')
try:
import h5py # pylint: disable=g-import-not-at-top
x = np.matrix([[1, 2], [3, 4]])
y = np.array([1, 2])
file_path = os.path.join(self._base_dir, 'test_hdf5.h5')
h5f = h5py.File(file_path, 'w')
h5f.create_dataset('x', data=x)
h5f.create_dataset('y', data=y)
h5f.close()
h5f = h5py.File(file_path, 'r')
x = h5f['x']
y = h5f['y']
func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3))
func(
data_feeder.DataFeeder(
self._wrap_dict(x, 'in'),
self._wrap_dict(y, 'out'),
n_classes=self._wrap_dict(0, 'out'),
batch_size=3))
except ImportError:
print("Skipped test for hdf5 since it's not installed.")
class SetupPredictDataFeederTest(DataFeederTest):
"""Tests for `DataFeeder.setup_predict_data_feeder`."""
def test_iterable_data(self):
# pylint: disable=undefined-variable
def func(df):
self._assertAllClose(six.next(df), [[1, 2], [3, 4]])
self._assertAllClose(six.next(df), [[5, 6]])
data = [[1, 2], [3, 4], [5, 6]]
x = iter(data)
x_dict = iter([self._wrap_dict(v) for v in iter(data)])
func(data_feeder.setup_predict_data_feeder(x, batch_size=2))
func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
liberatorqjw/scikit-learn
|
sklearn/tree/tests/test_export.py
|
37
|
2897
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
"""Check correctness of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
"""Check for errors of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
|
bsd-3-clause
|
ThomasMiconi/htmresearch
|
projects/vehicle-control/agent/run_q.py
|
12
|
5498
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import defaultdict
import operator
import time
import numpy
from unity_client.server import Server
from sensorimotor.encoders.one_d_depth import OneDDepthEncoder
from sensorimotor.q_learner import QLearner
ACTIIONS = ["-1", "0", "1"]
class Agent(object):
def __init__(self, position):
self.encoder = OneDDepthEncoder(positions=positions,
radius=5,
wrapAround=True,
nPerPosition=28,
wPerPosition=3,
minVal=0,
maxVal=1)
self.plotter = Plotter(self.encoder)
self.learner = QLearner(ACTIIONS, n=1008)
self.lastState = None
self.lastAction = None
def sync(self, outputData):
if not ("ForwardsSweepSensor" in outputData and
"steer" in outputData):
print "Warning: Missing data:", outputData
return
if outputData.get("reset"):
print "Reset."
sensor = outputData["ForwardsSweepSensor"]
steer = outputData["steer"]
reward = outputData.get("reward") or 0
encoding = self.encoder.encode(numpy.array(sensor))
if self.lastState is not None:
self.learner.update(self.lastState, str(self.lastAction),
encoding, str(steer), reward)
value = self.learner.value(encoding)
qValues = {}
for action in ACTIIONS:
qValues[action] = self.learner.qValue(encoding, action)
inputData = {}
inputData["qValues"] = qValues
inputData["bestAction"] = self.learner.bestAction(encoding)
self.plotter.update(sensor, encoding, steer, reward, value, qValues)
if outputData.get("reset"):
self.plotter.render()
self.lastState = encoding
self.lastAction = steer
return inputData
class Plotter(object):
def __init__(self, encoder):
self.encoder = encoder
self.sensor = []
self.encoding = []
self.steer = []
self.reward = []
self.value = []
self.qValues = defaultdict(lambda: [])
self.bestAction = []
import matplotlib.pyplot as plt
self.plt = plt
import matplotlib.cm as cm
self.cm = cm
from pylab import rcParams
rcParams.update({'figure.figsize': (6, 9)})
# rcParams.update({'figure.autolayout': True})
rcParams.update({'figure.facecolor': 'white'})
def update(self, sensor, encoding, steer, reward, value, qValues):
self.sensor.append(sensor)
self.encoding.append(encoding)
self.steer.append(steer)
self.reward.append(reward)
self.value.append(value)
for key, value in qValues.iteritems():
self.qValues[key].append(value)
bestAction = int(max(qValues.iteritems(), key=operator.itemgetter(1))[0])
self.bestAction.append(bestAction)
def render(self):
self.plt.figure(1)
self.plt.clf()
n = 7
self.plt.subplot(n,1,1)
self._plot(self.steer, "Steer over time")
self.plt.subplot(n,1,2)
self._plot(self.reward, "Reward over time")
self.plt.subplot(n,1,3)
self._plot(self.value, "Value over time")
self.plt.subplot(n,1,4)
shape = len(self.encoder.positions), self.encoder.scalarEncoder.getWidth()
encoding = numpy.array(self.encoding[-1]).reshape(shape).transpose()
self._imshow(encoding, "Encoding at time t")
self.plt.subplot(n,1,5)
data = self.encoding
w = self.encoder.w
overlaps = [sum(a & b) / float(w) for a, b in zip(data[:-1], data[1:])]
self._plot(overlaps, "Encoding overlaps between consecutive times")
# for i, action in enumerate(ACTIIONS):
# self.plt.subplot(n,1,4+i)
# self._plot(self.qValues[action], "Q value: {0}".format(action))
# self.plt.subplot(n,1,7)
# self._plot(self.bestAction, "Best action")
self.plt.draw()
self.plt.savefig("q-{0}.png".format(time.time()))
def _plot(self, data, title):
self.plt.title(title)
self.plt.xlim(0, len(data))
self.plt.plot(range(len(data)), data)
def _imshow(self, data, title):
self.plt.title(title)
self.plt.imshow(data,
cmap=self.cm.Greys,
interpolation="nearest",
aspect='auto',
vmin=0,
vmax=1)
if __name__ == "__main__":
# complete uniform
# positions = [i*20 for i in range(36)]
# forward uniform
positions = [i*10 for i in range(-18, 18)]
agent = Agent(positions)
Server(agent)
|
agpl-3.0
|
pandas-ml/pandas-ml
|
pandas_ml/skaccessors/gaussian_process.py
|
3
|
2067
|
#!/usr/bin/env python
from pandas_ml.core.accessor import _AccessorMethods, _attach_methods, _wrap_data_func
class GaussianProcessMethods(_AccessorMethods):
"""
Accessor to ``sklearn.gaussian_process``.
"""
_module_name = 'sklearn.gaussian_process'
_method_mapper = dict(predict={'GaussianProcess': '_predict'})
@property
def correlation_models(self):
"""Property to access ``sklearn.gaussian_process.correlation_models``"""
module_name = 'sklearn.gaussian_process.correlation_models'
attrs = ['absolute_exponential', 'squared_exponential',
'generalized_exponential', 'pure_nugget',
'cubic', 'linear']
return _AccessorMethods(self._df, module_name=module_name, attrs=attrs)
@property
def regression_models(self):
"""Property to access ``sklearn.gaussian_process.regression_models``"""
return RegressionModelsMethods(self._df)
@classmethod
def _predict(cls, df, estimator, *args, **kwargs):
data = df.data.values
eval_MSE = kwargs.get('eval_MSE', False)
if eval_MSE:
y, MSE = estimator.predict(data, *args, **kwargs)
if y.ndim == 1:
y = df._constructor_sliced(y, index=df.index)
MSE = df._constructor_sliced(MSE, index=df.index)
else:
y = df._constructor(y, index=df.index)
MSE = df._constructor(MSE, index=df.index)
return y, MSE
else:
y = estimator.predict(data, *args, **kwargs)
if y.ndim == 1:
y = df._constructor_sliced(y, index=df.index)
else:
y = df._constructor(y, index=df.index)
return y
class RegressionModelsMethods(_AccessorMethods):
_module_name = 'sklearn.gaussian_process.regression_models'
_regression_methods = ['constant', 'linear', 'quadratic']
_attach_methods(RegressionModelsMethods, _wrap_data_func, _regression_methods)
|
bsd-3-clause
|
sonnyhu/numpy
|
doc/example.py
|
81
|
3581
|
"""This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi') :
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
|
bsd-3-clause
|
waterponey/scikit-learn
|
sklearn/tree/tests/test_export.py
|
33
|
9901
|
"""
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in, assert_equal, assert_raises
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
y_degraded = [1, 1, 1, 1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
contents1 = export_graphviz(clf, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
contents1 = export_graphviz(clf, feature_names=["feature0", "feature1"],
out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
contents1 = export_graphviz(clf, class_names=["yes", "no"], out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
contents1 = export_graphviz(clf, filled=True, impurity=False,
proportion=True, special_characters=True,
rounded=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
contents1 = export_graphviz(clf, max_depth=0,
class_names=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
contents1 = export_graphviz(clf, max_depth=0, filled=True,
out_file=None, node_ids=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
contents1 = export_graphviz(clf, filled=True,
impurity=False, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
contents1 = export_graphviz(clf, filled=True, leaves_parallel=True,
out_file=None, rotate=True, rounded=True)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
# Test classifier with degraded learning set
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X, y_degraded)
contents1 = export_graphviz(clf, filled=True, out_file=None)
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", ' \
'fillcolor="#e5813900"] ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
|
bsd-3-clause
|
srjit/fakenewschallange
|
code/python/classifier.py
|
1
|
3897
|
import os
mingw_path = 'C:\\Program Files\\mingw-w64\\x86_64-6.3.0-posix-seh-rt_v5-rev2\\mingw64\\bin'
os.environ['PATH'] = mingw_path + ';' + os.environ['PATH']
import xgboost as xgb
import sklearn.svm
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn import linear_model, datasets
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
def train_XGB(data, test):
"""
Arguments:
- `data`:
"""
predictors = ["overlapping","reoccur1", "reoccur2", "reoccur3","reoccur4", "reoccur5", "reoccur6","euclidean","refuting_feature_count","char_length_headline","char_length_body"]#,"cosine"]#,"wmdistance", "euclidean"]
response = data.Stance
# n_estimators = range(50,500, 50)
# depth = range(1,10)
#
# param_grid = dict(n_estimators=n_estimators, max_depth = depth)
#
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)
#
# model = xgb.XGBClassifier()
#
# grid_search = GridSearchCV(model, param_grid, scoring="neg_log_loss", n_jobs=-1, cv=kfold)
# grid_result = grid_search.fit(data[predictors], response)
#
# print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
gbm = xgb.XGBClassifier(max_depth=5, n_estimators=300, learning_rate=0.1,objective='multi:softmax').fit(data[predictors], response)
_test = test[["overlapping", "reoccur1", "reoccur2", "reoccur3","reoccur4", "reoccur5", "reoccur6","euclidean","refuting_feature_count","char_length_headline","char_length_body"]]#,"cosine"#,"wmdistance", "euclidean"]]
_predictions = gbm.predict(_test)
predictions = pd.Series(_predictions.tolist())
test["predicted_XGB"] = predictions.values
## Accuracy calculation
test["is_correct_prediction_XGB"] = test["Stance"] == test["predicted_XGB"]
correctly_predicted_rows = test[test['is_correct_prediction_XGB'] == True]
print("Accuracy : ", float(len(correctly_predicted_rows))/len(test))
def train_SVM(data):
predictors = ["overlapping","reoccur1", "reoccur2", "reoccur3", "reoccur4", "reoccur5", "reoccur6","euclidean","refuting_feature_count","char_length_headline","char_length_body"]
response = data.Stance
clf = sklearn.svm.LinearSVC().fit(data[predictors],response)
return clf
def train_logistic(data):
"""
Arguments:
- `data`:
"""
predictors = ["overlapping","reoccur1", "reoccur2", "reoccur3", "reoccur4", "reoccur5", "reoccur6","euclidean""refuting_feature_count","char_length_headline","char_length_body"]
response = data.Stance
logreg = linear_model.LogisticRegression(C=1e5)
logistic_classifier = logreg.fit(data[predictors],response)
def randomForest (train,test):
"""
Arguments:
- `data`:
"""
predictors = ["overlapping","reoccur1", "reoccur2", "reoccur3","reoccur4", "reoccur5", "euclidean","refuting_feature_count","char_length_headline","char_length_body"]#,"cosine"]#,"wmdistance", "euclidean"]
response = train.Stance
_test = test[["overlapping", "reoccur1", "reoccur2", "reoccur3","reoccur4", "reoccur5","euclidean","refuting_feature_count","char_length_headline","char_length_body"]]#,"cosine"]] #,"wmdistance", "euclidean"]]
clf = RandomForestClassifier(n_jobs=2)
clf.fit(train[predictors],response)
_predictions = clf.predict(_test)
predictions = pd.Series(_predictions.tolist())
test["predicted_RF"] = predictions.values
test["is_correct_prediction_RF"] = test["Stance"] == test["predicted_RF"]
correctly_predicted_rows = test[test['is_correct_prediction_RF'] == True]
print("Accuracy for Random Forest : ", float(len(correctly_predicted_rows))/len(test))
# print(" Cross Tab for Random Forest ")
# print (pd.crosstab(test.Stance, test.predicted_RF))
|
gpl-3.0
|
nkoukou/University_Projects_Year_3
|
EDM_Assembly/edm.py
|
1
|
7390
|
'''
Models the EDM experiment electric plate assembly.
'''
import numpy as np
import matplotlib.pylab as plt
import base_class as bc
reload(bc)
class Edm(bc.PotentialGrid):
'''
Represents the EDM experiment electric field plate setup.
'''
def __init__(self, tube_dist, scale):
'''
Input parameters:
tube_dist - distance to the grounded border (dimensionless - integer)
scale - number of grid points per unit length (1/[10mm] - integer)
'''
self.dim = {'x': bc.edm_x, 'u': bc.edm_u}
self.tube_dist = tube_dist
self.scale = scale
self.grid, self.fix = bc.gen_edm_grid(tube_dist, scale)
def set_tube_dist(self, tube_dist, silent=False):
self.tube_dist = int(tube_dist)
self.gen_grid()
if not silent: print self
def set_scale(self, scale, silent=False):
self.scale = int(scale)
self.gen_grid()
if not silent: print self
def __repr__(self):
return "Edm( tube_dist={0}, scale={1} )".format(self.tube_dist,
self.scale)
def gen_grid(self):
'''
Generates 2D grid for EDM experiment.
'''
self.grid, self.fix = bc.gen_edm_grid(self.tube_dist, self.scale)
def calc_efield(self, plot=True):
'''
Calculates and plots electric field along particle path.
'''
h = self.dim['x'][0]/self.scale
mid = self.grid.shape[0]/2
path = self.grid[mid-2:mid+3,:]
efield = []
for i in range(1, path.shape[1]-1):
efx = (+ path[2,i+1] - path[2,i-1])*self.dim['u'][0]/(2*h)
efy = ((- path[0,i] + 8*path[1,i] - 8*path[3,i] + path[4,i]
)*self.dim['u'][0])/(12*h)
efield.append(np.sqrt(efx*efx + efy*efy))
path = path[:,1:-1]
edge = len(path[2])*self.dim['x'][0]/self.scale/2.
xaxis = np.linspace(-edge, edge, len(path[2]))
if not plot: return xaxis, efield
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.set_title(r'Cross section of electric field along particle path',
# fontsize=55)
ax.set_xlabel(r'$system\ size\ (%s)$'% (self.dim['x'][1]), fontsize=37)
ax.set_ylabel(r'$electric\ field\ E\ (MVm^{-1})$', fontsize=37)
ax.tick_params(axis='both', labelsize=27)
ax.plot(xaxis, efield)
def analyse_homogeneity(self, tube_dist=1, plot=True):
'''
Calculates homogeneity of electric field along particle path for given
tube distance from the system.
'''
self.set_tube_dist(tube_dist, silent=True)
self.converge_grid(w=-1, accuracy=5e-11)
xaxis, efield = self.calc_efield(plot=False)
small_homogen, big_homogen, big_ind = [], [], (tube_dist+3)*self.scale
small_plate = efield[tube_dist*self.scale+1:(tube_dist+2)*self.scale]
big_plate = efield[big_ind+1:-big_ind]
small_ref = small_plate[len(small_plate)/2]
big_ref = big_plate[len(big_plate)/2]
small_check = np.absolute(small_plate - small_ref)/small_ref
big_check = np.absolute(big_plate - big_ref)/big_ref
levels = np.logspace(0,-5,1000)
for level in levels:
small_pass = small_check[small_check<level]
big_pass = big_check[big_check<level]
small_homogen.append(len(small_pass))
big_homogen.append(len(big_pass))
small_homogen = np.array(small_homogen)*self.dim['x'][0]/self.scale
big_homogen = np.array(big_homogen)*self.dim['x'][0]/self.scale
if not plot: return levels, small_homogen, big_homogen
fig = plt.figure()
ax = fig.add_subplot(111)
axb, axs = fig.add_subplot(211), fig.add_subplot(212)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off',
left='off', right='off')
ax.set_title(r'Electric field homogeneity between plates', fontsize=55)
ax.set_xlabel(r'$level\ of\ homogeneity$', fontsize=37, labelpad=5)
ax.set_ylabel(r'$path\ length\ (%s)$'% (self.dim['x'][1]), fontsize=37,
labelpad=20)
axb.plot(levels, big_homogen, label=r'Big plates')
axb.legend(loc='lower left', prop={'size':40})
axb.tick_params(axis='both', labelsize=27)
axb.invert_xaxis()
axs.plot(levels, small_homogen, color='g', label=r'Small plates')
axs.legend(loc='lower left', prop={'size':40})
axs.tick_params(axis='both', labelsize=27)
axs.invert_xaxis()
def dustify(self, plate='central', pos='centre', behaviour='conductor'):
'''
Adds a dust particle of size 100um at given plate, on side facing the
particle path. Can be positioned (pos) at the middle or at the central
edge of plate (left edge for central plate).
It can either be an insulator or a conductor corresponding to a fixed
potential of 0 or the value at given plate respectively.
'''
row = 1+(self.tube_dist+1)*self.scale
if plate=='left':
if pos=='centre': col = (self.tube_dist+1)*self.scale
else: col = (self.tube_dist+2)*self.scale
elif plate=='central':
if pos=='centre': col = (self.tube_dist+13)*self.scale
else: col = 1+(self.tube_dist+3)*self.scale
elif plate=='right':
if pos=='centre': col = (self.tube_dist+25)*self.scale
else: col = 1+(self.tube_dist+24)*self.scale
if behaviour=='insulator':
u = 0.
elif behaviour=='conductor':
u = self.grid[row-1, col]
if self.scale==100:
self.grid[row, col] = u
self.fix[row, col] = 1.
elif self.scale==200:
self.grid[row:row+2, col:col+2] = u
self.fix[row:row+2, col:col+2] = 1.
else:
raise ValueError('Method dustify requires scale = 100 or 200')
def plot_middle_dust(self):
'''
Plots electic field for the cases of a pure system and of an
insulating and a conducting dust particle sitting at the middle of the
central plate.
'''
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(r'Cross section of electric field along particle path',
fontsize=55)
ax.set_xlabel(r'$system\ size\ (%s)$'% (self.dim['x'][1]), fontsize=37)
ax.set_ylabel(r'$electric\ field\ E\ (MVm^{-1})$', fontsize=37)
ax.tick_params(axis='both', labelsize=27)
self.converge_grid(accuracy=1e-7)
xaxis, efield = self.calc_efield(plot=False)
ax.plot(xaxis, efield, label=r'No dust')
self.dustify(behaviour='conductor')
self.converge_grid(accuracy=1e-7)
xaxis, efield = self.calc_efield(plot=False)
ax.plot(xaxis, efield, label=r'Conducting dust')
self.dustify(behaviour='insulator')
self.converge_grid(accuracy=1e-7)
xaxis, efield = self.calc_efield(plot=False)
ax.plot(xaxis, efield, label=r'Insulating dust')
leg = ax.legend(prop={'size':40})
leg.draggable()
|
mit
|
reuk/waveguide
|
demo/evaluation/room_sizes/rt60.py
|
2
|
1951
|
#!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from string import split
import scipy.signal as signal
import wave
import math
import os
import re
import json
import sys
sys.path.append('python')
def get_frequency_rt30_tuple(line):
split = line.split()
return (float(split[0]), float(split[6]))
def read_rt30(fname):
with open(fname) as f:
lines = f.readlines()
return [get_frequency_rt30_tuple(line) for line in lines[14:22]]
def main():
files = [
("small", "small.txt"),
("medium", "medium.txt"),
("large", "large.txt"),
]
for label, fname in files:
tuples = read_rt30(fname)
x = [freq for freq, _ in tuples]
y = [time for _, time in tuples]
min_time = min(y)
max_time = max(y)
average = (max_time - min_time) * 100.0 / ((max_time + min_time) * 0.5)
print('file: {}, min: {}, max: {}, average: {}'.format(
fname, min_time, max_time, average))
plt.plot(x, y, label=label, marker='o', linestyle='--')
plt.xscale('log')
plt.axvline(x=500)
plt.annotate(xy=(520, 1.4), s='waveguide cutoff')
plt.legend(loc='lower center', ncol=3, bbox_to_anchor=(0, -0.05, 1, 1), bbox_transform=plt.gcf().transFigure)
plt.title('Octave-band T30 Measurements for Different Room Sizes')
plt.xlabel('frequency / Hz')
plt.ylabel('time / s')
plt.tight_layout()
#plt.subplots_adjust(top=0.9)
plt.show()
if render:
plt.savefig('room_size_rt30.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
|
gpl-2.0
|
MathYourLife/TSatPy-thesis
|
tex/sample_scripts/Controllers_02.py
|
1
|
5725
|
import time
import numpy as np
import matplotlib.pyplot as plt
from TSatPy import Controller, State
from TSatPy import StateOperator as SO
from TSatPy.Clock import Metronome
from GradientDescent import GradientDescent
print("Test PID Controller - Rate Control")
run_time = 60
speed = 20
c = Metronome()
c.set_speed(speed)
dt = 0.5
x_d = State.State(
State.Identity(),
State.BodyRate([0,0,0.314]))
def run_test(Kpx, Kpy, Kpz, Kix, Kiy, Kiz, Kdx, Kdy, Kdz, plot=False):
ts, Ms, Mps, Mis, Mds, ws = test(Kpx, Kpy, Kpz, Kix, Kiy, Kiz, Kdx, Kdy, Kdz)
if plot:
graph_it(ts, Ms, Mps, Mis, Mds, ws)
def test(Kpx, Kpy, Kpz, Kix, Kiy, Kiz, Kdx, Kdy, Kdz):
x_est = State.State(
State.Quaternion(np.random.rand(3,1),radians=np.random.rand()),
State.BodyRate(np.random.rand(3,1)))
I = [[4, 0, 0], [0, 4, 0], [0, 0, 2]]
plant_est = State.Plant(I, x_est, c)
Kp = SO.StateToMoment(
None,
SO.BodyRateToMoment([[Kpx,0,0],[0,Kpy,0],[0,0,Kpz]]))
Ki = SO.StateToMoment(
None,
SO.BodyRateToMoment([[Kix,0,0],[0,Kiy,0],[0,0,Kiz]]))
Kd = SO.StateToMoment(
None,
SO.BodyRateToMoment([[Kdx,0,0],[0,Kdy,0],[0,0,Kdz]]))
pid = Controller.PID(c)
pid.set_Kp(Kp)
pid.set_Ki(Ki)
pid.set_Kd(Kd)
pid.set_desired_state(x_d)
M = State.Moment()
ts = []
Ms = []
Mps = []
Mis = []
Mds = []
ws = []
start_time = c.tick()
end_time = c.tick() + run_time
while c.tick() < end_time:
time.sleep(dt / float(speed))
plant_est.propagate(M)
x_plant = plant_est.x
M = pid.update(x_plant)
ts.append(c.tick() - start_time)
Ms.append((M[0],M[1],M[2]))
Mps.append((pid.M_p[0],pid.M_p[1],pid.M_p[2]))
Mis.append((pid.M_i[0],pid.M_i[1],pid.M_i[2]))
Mds.append((pid.M_d[0],pid.M_d[1],pid.M_d[2]))
ws.append((x_plant.w[0],x_plant.w[1],x_plant.w[2]))
return ts, Ms, Mps, Mis, Mds, ws
def grid_me(ax):
ax.grid(color='0.75', linestyle='--', linewidth=1)
def graph_it(ts, Ms, Mps, Mis, Mds, ws):
fig = plt.figure(dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(2,1,1)
ax.plot(ts, [M[0] for M in Ms], c='b', label=r'$M_x$', lw=2)
ax.plot(ts, [M[1] for M in Ms], c='r', label=r'$M_y$', lw=2)
ax.plot(ts, [M[2] for M in Ms], c='g', label=r'$M_z$', lw=2)
ax.set_ylabel(r'Moment (Nm)')
grid_me(ax)
plt.legend(prop={'size':10})
ax = fig.add_subplot(2,1,2)
ax.plot(ts, [w[0] for w in ws], c='b', label=r'$\omega_x$', lw=2)
ax.plot(ts, [w[1] for w in ws], c='r', label=r'$\omega_y$', lw=2)
ax.plot(ts, [w[2] for w in ws], c='g', label=r'$\omega_z$', lw=2)
ax.set_ylabel(r'Body Rate (rad/sec)')
grid_me(ax)
plt.legend(prop={'size':10})
ax.set_xlabel('$t(k)$ seconds')
plt.tight_layout()
plt.draw()
fig = plt.figure(dpi=80, facecolor='w', edgecolor='k')
ax = fig.add_subplot(3,1,1)
ax.plot(ts, [M[0] for M in Mps], c='b', label=r'$M_x$', lw=2)
ax.plot(ts, [M[1] for M in Mps], c='r', label=r'$M_y$', lw=2)
ax.plot(ts, [M[2] for M in Mps], c='g', label=r'$M_z$', lw=2)
ax.set_ylabel(r'P-Moment (Nm)')
grid_me(ax)
plt.legend(prop={'size':10})
ax = fig.add_subplot(3,1,2)
ax.plot(ts, [M[0] for M in Mis], c='b', label=r'$M_x$', lw=2)
ax.plot(ts, [M[1] for M in Mis], c='r', label=r'$M_y$', lw=2)
ax.plot(ts, [M[2] for M in Mis], c='g', label=r'$M_z$', lw=2)
ax.set_ylabel(r'I-Moment (Nm)')
grid_me(ax)
plt.legend(prop={'size':10})
ax = fig.add_subplot(3,1,3)
ax.plot(ts, [M[0] for M in Mds], c='b', label=r'$M_x$', lw=2)
ax.plot(ts, [M[1] for M in Mds], c='r', label=r'$M_y$', lw=2)
ax.plot(ts, [M[2] for M in Mds], c='g', label=r'$M_z$', lw=2)
ax.set_ylabel(r'D-Moment (Nm)')
grid_me(ax)
plt.legend(prop={'size':10})
ax.set_xlabel('$t(k)$ seconds')
plt.tight_layout()
plt.show()
def calc_err(ts, Ms, Mps, Mis, Mds, ws):
M = np.array(Ms)
cost = np.abs(M).mean(axis=0).sum()
return cost
def main():
domains = [
['Kpx', 0.001, 0.9],
['Kpy', 0.001, 0.9],
['Kpz', 0.001, 0.9],
['Kix', 0, 0.01],
['Kiy', 0, 0.01],
['Kiz', 0, 0.01],
['Kdx', 0, 0.1],
['Kdy', 0, 0.1],
['Kdz', 0, 0.1],
]
kwargs = {
# Number of iterations to run
'N': 50,
# Definition of parameter search domain
'domains': domains,
# Function that will run a test
'run_test': test,
# Function that will take the return of run_test and determine
# how well the parameters worked.
'calc_cost': calc_err,
}
print(GradientDescent.descend(**kwargs))
return 0
if __name__ == '__main__':
kwargs = None
kwargs = {
'Kpx': 0.4239, 'Kpy': 0.4164, 'Kpz': 0.3460,
'Kix': 0.005723, 'Kiy': 0.003002, 'Kiz': 0.005465,
'Kdx': 0.04437, 'Kdy': 0.07173, 'Kdz': 0.04188
}
if kwargs is not None:
kwargs['plot'] = True
run_test(**kwargs)
else:
exit(main())
# Best performance at
# Kpx:
# val: 0.423915 range: 0.001,0.9 std: 0.0528036
# Kpy:
# val: 0.416379 range: 0.001,0.9 std: 0.0508544
# Kpz:
# val: 0.346048 range: 0.001,0.9 std: 0.0263608
# Kix:
# val: 0.00572302 range: 0,0.01 std: 0.000574547
# Kiy:
# val: 0.00300189 range: 0,0.01 std: 0.00112776
# Kiz:
# val: 0.00546487 range: 0,0.01 std: 0.000738131
# Kdx:
# val: 0.0443719 range: 0,0.1 std: 0.00127098
# Kdy:
# val: 0.0717262 range: 0,0.1 std: 0.0113157
# Kdz:
# val: 0.0418845 range: 0,0.1 std: 0.00599079
|
mit
|
tentangdata/pinisi
|
scripts/lln.py
|
1
|
3796
|
import argparse
import json
import math
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as scp
import seaborn as sns
import functions
# Constants
LEVEL_IDS = list(range(1, 7))
LINE_COL = 'saddlebrown'
LW = 3
BG_COLOR = 'tan'
XLABEL = 'Jumlah pemain'
YLABEL = 'Rata-rata jarak'
NUM_IQR = 2.0
sns.set_style('white')
# Functions
def plot_cum_dist(dist_points, level_id, line_col=None, lw=None, bg_color=None, show_mean=False, title=None, xlabel=None, ylabel=None, save_to=None):
cum_dist = calc_cum_dist(dist_points)
ax = cum_dist.plot.line(color=line_col or LINE_COL, lw=lw or LW)
ax.set_axis_bgcolor(bg_color or BG_COLOR)
if show_mean:
plt.axhline(dist_points.mean(), 0, len(dist_points), linestyle='--', color=LINE_COL)
plt.title(title or "Rata-rata jarak pemain terhadap jumlah pemain - Level {}".format(level_id))
plt.xlabel(xlabel or XLABEL)
plt.ylabel(ylabel or YLABEL)
if save_to:
try:
os.makedirs(save_to)
except:
pass
plt.savefig(os.path.join(save_to, 'level{}.png'.format(level_id)))
return ax
def calc_cum_dist(dist_points):
return dist_points.cumsum() / pd.Series(range(1, len(dist_points) + 1))
def parse_and_assert_args():
arg_parser = argparse.ArgumentParser(prog='pinisi', description="Law of large numbers simulation with Pinisi data", usage="")
arg_parser.add_argument('-r', dest='filter_rect', action='store_true', help='Filter data points only inside rectangle. Takes precedence over num_iqr (default: False)')
arg_parser.add_argument('-i', dest='num_iqr', help='Filter data points outside num_iqr * IQR. Set to 0 to prevent filtering (default: {})'.format(NUM_IQR))
arg_parser.add_argument('-m', dest='show_mean', action='store_true', help='Show mean line (default: False)')
arg_parser.add_argument('-s', dest='save_to', help='Directory for saving plots (default: None)')
args = arg_parser.parse_args()
num_iqr = args.num_iqr
if num_iqr is not None:
num_iqr = float(num_iqr)
assert num_iqr >= 0.0, "num_iqr must be >= 0"
return args
if __name__ == '__main__':
args = parse_and_assert_args()
filter_rect = args.filter_rect
num_iqr = float(args.num_iqr) if args.num_iqr else NUM_IQR
show_mean = args.show_mean
save_to = args.save_to
TRUTH_ID = int(open('../data/truth_ID').read())
EXPERT_ID = int(open('../data/expert_ID').read())
points = pd.read_csv('../data/clean/points.psv', sep='|', index_col='id')
users = pd.read_csv('../data/clean/users.psv', sep='|', index_col='id')
levels = json.load(open('../data/levels.json', 'r'))['maps']
rects = {level['level']: functions.get_rect(level['polygon']) for level in levels}
areas = {level_id: functions.calc_rect_area(rect) for level_id, rect in rects.items()}
points_truth = points[points.user_id == TRUTH_ID].reset_index(drop=True)
points_expert = points[points.user_id == EXPERT_ID].reset_index(drop=True)
points = points[~points.user_id.isin([TRUTH_ID, EXPERT_ID])].reset_index(drop=True)
dists = {level_id: functions.get_dist_level(points_truth, points_expert, points, level_id) for level_id in LEVEL_IDS}
dists_points = {level_id: d[0] for level_id, d in dists.items()}
for level_id, dist_points in dists_points.items():
if filter_rect:
points_idx = functions.get_points_level(points, rects, level_id).index
dist_points = dist_points[points_idx]
elif num_iqr:
dist_points = functions.filter_iqr(dist_points, num_iqr)
dist_points = dist_points.reset_index(drop=True)
plot_cum_dist(dist_points, level_id, show_mean=show_mean, save_to=save_to)
plt.clf()
|
gpl-2.0
|
musically-ut/statsmodels
|
statsmodels/iolib/tests/test_foreign.py
|
25
|
7274
|
"""
Tests for iolib/foreign.py
"""
import os
import warnings
from datetime import datetime
from numpy.testing import *
import numpy as np
from pandas import DataFrame, isnull
import pandas.util.testing as ptesting
from statsmodels.compat.python import BytesIO, asbytes
import statsmodels.api as sm
from statsmodels.iolib.foreign import (StataWriter, genfromdta,
_datetime_to_stata_elapsed, _stata_elapsed_date_to_datetime)
from statsmodels.datasets import macrodata
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# Test precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
curdir = os.path.dirname(os.path.abspath(__file__))
def test_genfromdta():
#Test genfromdta vs. results/macrodta.npy created with genfromtxt.
#NOTE: Stata handles data very oddly. Round tripping from csv to dta
# to ndarray 2710.349 (csv) -> 2510.2491 (stata) -> 2710.34912109375
# (dta/ndarray)
from .results.macrodata import macrodata_result as res2
res1 = genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta')
assert_array_equal(res1 == res2, True)
def test_genfromdta_pandas():
from pandas.util.testing import assert_frame_equal
dta = macrodata.load_pandas().data
curdir = os.path.dirname(os.path.abspath(__file__))
res1 = sm.iolib.genfromdta(curdir+'/../../datasets/macrodata/macrodata.dta',
pandas=True)
res1 = res1.astype(float)
assert_frame_equal(res1, dta)
def test_stata_writer_structured():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
dta = dta.astype(np.dtype([('year', int),
('quarter', int)] + dtype.descr[2:]))
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_array_equal(dta, dta2)
def test_stata_writer_array():
buf = BytesIO()
dta = macrodata.load().data
dta = DataFrame.from_records(dta)
dta.columns = ["v%d" % i for i in range(1,15)]
writer = StataWriter(buf, dta.values)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta = dta.to_records(index=False)
assert_array_equal(dta, dta2)
def test_missing_roundtrip():
buf = BytesIO()
dta = np.array([(np.nan, np.inf, "")],
dtype=[("double_miss", float), ("float_miss", np.float32),
("string_miss", "a1")])
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta = genfromdta(buf, missing_flt=np.nan)
assert_(isnull(dta[0][0]))
assert_(isnull(dta[0][1]))
assert_(dta[0][2] == asbytes(""))
dta = genfromdta(os.path.join(curdir, "results/data_missing.dta"),
missing_flt=-999)
assert_(np.all([dta[0][i] == -999 for i in range(5)]))
def test_stata_writer_pandas():
buf = BytesIO()
dta = macrodata.load().data
dtype = dta.dtype
#as of 0.9.0 pandas only supports i8 and f8
dta = dta.astype(np.dtype([('year', 'i8'),
('quarter', 'i8')] + dtype.descr[2:]))
dta4 = dta.astype(np.dtype([('year', 'i4'),
('quarter', 'i4')] + dtype.descr[2:]))
dta = DataFrame.from_records(dta)
dta4 = DataFrame.from_records(dta4)
# dta is int64 'i8' given to Stata writer
writer = StataWriter(buf, dta)
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
dta5 = DataFrame.from_records(dta2)
# dta2 is int32 'i4' returned from Stata reader
if dta5.dtypes[1] is np.dtype('int64'):
ptesting.assert_frame_equal(dta.reset_index(), dta5)
else:
# don't check index because it has different size, int32 versus int64
ptesting.assert_frame_equal(dta4, dta5[dta5.columns[1:]])
def test_stata_writer_unicode():
# make sure to test with characters outside the latin-1 encoding
pass
@dec.skipif(pandas_old)
def test_genfromdta_datetime():
results = [(datetime(2006, 11, 19, 23, 13, 20), 1479596223000,
datetime(2010, 1, 20), datetime(2010, 1, 8), datetime(2010, 1, 1),
datetime(1974, 7, 1), datetime(2010, 1, 1), datetime(2010, 1, 1)),
(datetime(1959, 12, 31, 20, 3, 20), -1479590, datetime(1953, 10, 2),
datetime(1948, 6, 10), datetime(1955, 1, 1), datetime(1955, 7, 1),
datetime(1955, 1, 1), datetime(2, 1, 1))]
with warnings.catch_warnings(record=True) as w:
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"))
assert_(len(w) == 1) # should get a warning for that format.
assert_array_equal(dta[0].tolist(), results[0])
assert_array_equal(dta[1].tolist(), results[1])
with warnings.catch_warnings(record=True):
dta = genfromdta(os.path.join(curdir, "results/time_series_examples.dta"),
pandas=True)
assert_array_equal(dta.irow(0).tolist(), results[0])
assert_array_equal(dta.irow(1).tolist(), results[1])
def test_date_converters():
ms = [-1479597200000, -1e6, -1e5, -100, 1e5, 1e6, 1479597200000]
days = [-1e5, -1200, -800, -365, -50, 0, 50, 365, 800, 1200, 1e5]
weeks = [-1e4, -1e2, -53, -52, -51, 0, 51, 52, 53, 1e2, 1e4]
months = [-1e4, -1e3, -100, -13, -12, -11, 0, 11, 12, 13, 100, 1e3, 1e4]
quarter = [-100, -50, -5, -4, -3, 0, 3, 4, 5, 50, 100]
half = [-50, 40, 30, 10, 3, 2, 1, 0, 1, 2, 3, 10, 30, 40, 50]
year = [1, 50, 500, 1000, 1500, 1975, 2075]
for i in ms:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tc"), "tc"), i)
for i in days:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "td"), "td"), i)
for i in weeks:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tw"), "tw"), i)
for i in months:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tm"), "tm"), i)
for i in quarter:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "tq"), "tq"), i)
for i in half:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "th"), "th"), i)
for i in year:
assert_equal(_datetime_to_stata_elapsed(
_stata_elapsed_date_to_datetime(i, "ty"), "ty"), i)
@dec.skipif(pandas_old)
def test_datetime_roundtrip():
dta = np.array([(1, datetime(2010, 1, 1), 2),
(2, datetime(2010, 2, 1), 3),
(4, datetime(2010, 3, 1), 5)],
dtype=[('var1', float), ('var2', object), ('var3', float)])
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf)
assert_equal(dta, dta2)
dta = DataFrame.from_records(dta)
buf = BytesIO()
writer = StataWriter(buf, dta, {"var2" : "tm"})
writer.write_file()
buf.seek(0)
dta2 = genfromdta(buf, pandas=True)
ptesting.assert_frame_equal(dta, dta2.drop('index', axis=1))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
|
bsd-3-clause
|
pratapvardhan/scikit-learn
|
sklearn/neural_network/tests/test_rbm.py
|
225
|
6278
|
import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
|
bsd-3-clause
|
bejar/kemlglearn
|
kemlglearn/cluster/GlobalKMeans.py
|
1
|
6445
|
"""
.. module:: GlobalKMeans
GlobalKMeans
*************
:Description: GlobalKMeans
:Authors: bejar
:Version:
:Created on: 20/01/2015 10:42
"""
__author__ = 'bejar'
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
class GlobalKMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""Global K-means Algorithm
Paramereters:
n_clusters: int
maximum number of clusters to obtain
algorithm string
'classical' the classical algorithm
'bagirov' the Bagirov 2006 variant
"""
def __init__(self, n_clusters, algorithm='classical'):
self.n_clusters = n_clusters
self.cluster_centers_ = None
self.labels_ = None
self.cluster_sizes_ = None
self.inertia_ = None
self.algorithm = algorithm
def fit(self, X):
"""
Clusters the examples
:param X:
:return:
"""
if self.algorithm == 'classical':
self.cluster_centers_, self.labels_, self.inertia_ = self._fit_process(X)
elif self.algorithm == 'bagirov':
self.cluster_centers_, self.labels_, self.inertia_ = self._fit_process_bagirov(X)
return self
def predict(self, X):
"""
Returns the nearest cluster for a data matrix
@param X:
@return:
"""
clasif = []
for i in range(X.shape[0]):
ncl, mdist = self._find_nearest_cluster(X[i].reshape(1, -1), self.cluster_centers_)
if mdist <= self.radius:
clasif.append(ncl)
else:
clasif.append(-1)
return clasif
def _fit_process(self, X):
"""
Classical global k-means algorithm
:param X:
:return:
"""
# Compute the centroid of the dataset
centroids = sum(X) / X.shape[0]
centroids.shape = (1, X.shape[1])
for i in range(2, self.n_clusters + 1):
mininertia = np.infty
for j in range(X.shape[0]):
newcentroids = np.vstack((centroids, X[j]))
# print newcentroids.shape
km = KMeans(n_clusters=i, init=newcentroids, n_init=1)
km.fit(X)
if mininertia > km.inertia_:
mininertia = km.inertia_
bestkm = km
centroids = bestkm.cluster_centers_
return bestkm.cluster_centers_, bestkm.labels_, bestkm.inertia_
def _fit_process_bagirov(self, X):
"""
Clusters using the global K-means algorithm Bagirov variation
:param X:
:return:
"""
# Create a KNN structure for fast search
self._neighbors = NearestNeighbors()
self._neighbors.fit(X)
# Compute the centroid of the dataset
centroids = sum(X) / X.shape[0]
assignments = [0 for i in range(X.shape[0])]
centroids.shape = (1, X.shape[1])
# compute the distance of the examples to the centroids
mindist = np.zeros(X.shape[0])
for i in range(X.shape[0]):
mindist[i] = \
euclidean_distances(X[i].reshape(1, -1), centroids[assignments[i]].reshape(1, -1), squared=True)[0]
for k in range(2, self.n_clusters + 1):
newCentroid = self._compute_next_centroid(X, centroids, assignments, mindist)
centroids = np.vstack((centroids, newCentroid))
km = KMeans(n_clusters=k, init=centroids, n_init=1)
km.fit(X)
assignments = km.labels_
for i in range(X.shape[0]):
mindist[i] = \
euclidean_distances(X[i].reshape(1, -1), centroids[assignments[i]].reshape(1, -1), squared=True)[0]
return km.cluster_centers_, km.labels_, km.inertia_
def _compute_next_centroid(self, X, centroids, assignments, mindist):
"""
Computes the candidate for the next centroid
:param X:
:param centroids:
:return:
"""
minsum = np.infty
candCentroid = None
# Compute the first candidate to new centroid
for i in range(X.shape[0]):
distance = euclidean_distances(X[i].reshape(1, -1), centroids[assignments[i]].reshape(1, -1))[0]
S2 = self._neighbors.radius_neighbors(X[i].reshape(1, -1), radius=distance, return_distance=False)[0]
S2centroid = np.sum(X[S2], axis=0) / len(S2)
S2centroid.shape = (1, X.shape[1])
cost = self._compute_fk(X, mindist, S2centroid)
if cost < minsum:
minsum = cost
candCentroid = S2centroid
# Compute examples for the new centroid
S2 = []
newDist = euclidean_distances(X, candCentroid.reshape(1, -1), squared=True)
for i in range(X.shape[0]):
if newDist[i] < mindist[i]:
S2.append(i)
newCentroid = sum(X[S2]) / len(S2)
newCentroid.shape = (1, X.shape[1])
while not (candCentroid == newCentroid).all():
candCentroid = newCentroid
S2 = []
newDist = euclidean_distances(X, candCentroid.reshape(1, -1), squared=True)
for i in range(X.shape[0]):
if newDist[i] < mindist[i]:
S2.append(i)
newCentroid = np.sum(X[S2], axis=0) / len(S2)
newCentroid.shape = (1, X.shape[1])
return candCentroid
def _compute_fk(self, X, mindist, ccentroid):
"""
Computes the cost function
:param X:
:param mindist:
:param ccentroid:
:return:
"""
# Distances among the examples and the candidate centroid
centdist = euclidean_distances(X, ccentroid.reshape(1, -1), squared=True)
fk = 0
for i in range(X.shape[0]):
fk = fk + min(mindist[i], centdist[i][0])
return fk
@staticmethod
def _find_nearest_cluster(examp, centers):
"""
Finds the nearest cluster for an example
:param examp:
:param centers:
:return:
"""
dist = euclidean_distances(centers, examp.reshape(1, -1))
pmin = np.argmin(dist)
vmin = np.min(dist)
return pmin, vmin
|
mit
|
etkirsch/scikit-learn
|
sklearn/datasets/tests/test_rcv1.py
|
322
|
2414
|
"""Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
|
bsd-3-clause
|
albertfdp/dtu-data-mining
|
meneame/src/network/network.py
|
1
|
6349
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the functions for the network analysis and creation.
"""
import igraph as ig
import collections
import matplotlib.pyplot as plt
def test_create_graph():
"""Testing function for create_graph()
"""
vertices = {'user1': 12, 'user2': 8, 'user3': 7, 'user4': 4}
vertices = collections.Counter(vertices)
edges = {('user1', 'user2'): 23,
('user1', 'user3'): 12, ('user2', 'user4'): 11}
edges = collections.Counter(edges)
test_graph = create_graph(vertices, edges)
for v in test_graph.vs:
assert v['comments'] == vertices[v['name']]
for e in test_graph.es:
source = test_graph.vs[e.source]['name']
target = test_graph.vs[e.target]['name']
ed = tuple(sorted((source, target)))
assert edges[ed] == e['weight']
def create_graph(vertices, edges):
"""Return the graph object, given the edges and vertices collections.
:param vertices: collection of vertices, where each element is in the
\format "username: number_of_comments"
:param edges: collection of edges, where each element is in the format\
(username1, username2): weight, where the weight is the number\
of articles in which the two users have commented together\
:returns: the igraph object
The creation of the graph is done only on the final step of the
function, due to the way igraph deals with the edges.
Infact, building first the graph and then the edges would result in a
very inefficient code, as the edges are re-indexed every time a new edge
is added.
"""
usernames = vertices.keys()
comments = vertices.values()
users_dic = {name: idx for (idx, name) in enumerate(usernames)}
n_users = len(usernames)
edges_list = [(users_dic[el1],
users_dic[el2]) for (el1, el2) in edges.keys()]
weights_list = edges.values()
vertex_attrs = {'name': usernames, 'comments': comments}
edge_attrs = {'weight': weights_list}
return ig.Graph(n=n_users, edges=edges_list, vertex_attrs=vertex_attrs,
edge_attrs=edge_attrs)
def save_degree_distribution(graph, image_folder):
"""Save the degree distribution of the input graph to file.
:param graph: the input igraph object
:image_folder: the path to the folder where to save the images
"""
degree_dist = graph.degree_distribution()
x = [el[0] for el in degree_dist.bins()]
y = [el[2] for el in degree_dist.bins()]
title = "Network Degree Distribution"
xlabel = "Degree"
ylabel = "Number of nodes"
filename = graph['name'] + "_degree_distribution"
save_log_histogram(x, y, title, xlabel, ylabel, filename, image_folder)
def save_weights_distribution(graph, image_folder):
"""Saves the weigth distribution to file.
:param graph: the input igraph object
:image_folder: the path to the folder where to save the images
"""
weights = graph.es['weight']
co = collections.Counter(weights)
x = [el for el in co]
y = [co[el] for el in co]
title = "Weights distribution"
xlabel = "Weight"
ylabel = "Number of edges"
filename = graph['name'] + "_weights_distribution"
save_log_histogram(x, y, title, xlabel, ylabel, filename, image_folder)
def save_log_histogram(x, y, title, xlabel, ylabel, filename, image_folder):
"""Create a loglog histogram from the input x and y and saves it to\
file. Each bin has an unitary size.
:param x: bin positions
:param y: count values
:param title: title of the histogram
:param xlabel: label of the x axis
:param ylabel: label of the y axis
:param filename: name of the saved file
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x, y)
ax.set_xscale('log')
ax.set_yscale('log')
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.title(title)
plt.savefig(image_folder + filename + '.png')
plt.savefig(image_folder + filename + '.pdf')
def community_analysis(graph):
"""Analyze the communities of the input graph.
:param graph: the input igraph object
"""
print "\nCOMMUNITY ANALYSIS\n"
communities = graph.community_infomap(edge_weights='weight',
vertex_weights='comments',
trials=10)
print "Infomap ", communities.summary()
print "Sizes of communities: ", communities.sizes()
single_community_analysis(communities)
def single_community_analysis(communities):
"""Analyze the communities found. The analysis is done only if more\
than one community has been found.
:param graph: the input VertexClustering object representing the\
communities
"""
n_comm = len(communities.sizes())
if n_comm > 1:
for idx in range(n_comm):
print "\n COMMUNITY NUMBER ", idx, "\n"
general_analysis(communities.subgraph(idx))
def general_analysis(graph):
"""Print to screen some basic information regarding the input graph.
:param graph: the input igraph object
"""
comments = graph.vs['comments']
weights = graph.es['weight']
print "GENERAL ANALYSIS\n"
print "Number of users: ", graph.vcount()
print "Number of links: ", graph.ecount()
print "\nMax. number of comment per user: ", max(comments)
print "Min. number of comment per user: ", min(comments)
print "Average number of comments: ",
print float(sum(comments)) / len(comments)
print "\nMax. value of weight: ", max(weights)
print "Min. value of weight: ", min(weights)
print "Average weight: ", float(sum(weights)) / len(weights)
print "\nClustering coefficient: ", graph.transitivity_undirected()
components = graph.components()
print "\nNumber of connected components: ", len(components.sizes())
hist = graph.path_length_hist()
lengths, paths = [[el[i] for el in list(hist.bins())] for i in [0, 2]]
print "\nPath length distibution: "
for i in range(len(lengths)):
print paths[i], " paths with length ", lengths[i]
print "\nAverage path length: ",
splengths = sum([lengths[i] * paths[i] for i in range(len(lengths))])
print splengths / sum(paths)
|
apache-2.0
|
hunse/vrep-python
|
dvs-gabor-nef.py
|
1
|
4917
|
"""Try representing the spiking data with Gabor RFs.
1. Give an ensemble a bunch of Gabor encoders
"""
import numpy as np
import matplotlib.pyplot as plt
import nengo
import vanhateren
plt.ion()
import dvs
import gabor
from hunse_tools.timing import tic, toc
def show_image(ax, image):
plt_img = ax.imshow(image, vmin=-1, vmax=1, cmap='gray', interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
return plt_img
rng = np.random.RandomState(3)
dt = 0.001
# --- load and filter DVS spikes
filename = 'dvs.npz'
# filename = 'dvs-ball-1ms.npz'
events = dvs.load(filename, dt_round=False)
t0 = 0.1
spikes = dvs.make_video(events, t0=t0, dt_frame=dt, tau=0)
video = dvs.make_video(events, t0=t0, dt_frame=dt, tau=0.005)
if 0:
plt.ion()
plt.figure(1)
ax = plt.gca()
img = show_image(ax, video[0])
ax.invert_yaxis()
for frame in video:
img.set_data(frame)
plt.draw()
raw_input("Pause...")
if 0:
plt.figure(1)
ani = dvs.animate([video], [plt.gca()])
plt.show()
# --- generate a filterbank
n = 5000
# rf_shape = (9, 9)
rf_shape = (15, 15)
thetas = rng.uniform(0, np.pi, size=n)
# phases = rng.uniform(-0.5, 0.5, size=n)
freqs = rng.uniform(0.1, 2.0, size=n)
# freqs = rng.uniform(0.2, 0.7, size=n)
# data_set = gabors(thetas, phases=0, freqs=0.5, sigmas_y=10., shape=rf_shape)
bank = gabor.bank(thetas, freqs=freqs, sigmas_y=10., shape=rf_shape)
if 0:
# test Gabors
plt.figure(1)
plt.clf()
r, c = 5, 5
for i in range(min(len(bank), r * c)):
plt.subplot(r, c, i+1)
plt.imshow(bank[i].reshape(rf_shape), cmap='gray', interpolation='none')
plt.xticks([])
# pair = filter_pair((15, 15), 1.0, 0.5, 1.0)
# axs = [plt.subplot(1, 2, i+1) for i in range(2)]
# axs[0].imshow(pair.real)
# axs[1].imshow(pair.imag)
plt.show()
assert False
# --- make encoders
im_shape = (128, 128)
inds = gabor.inds(len(bank), im_shape, rf_shape, rng=rng)
encoders = gabor.matrix_from_inds(bank, inds, im_shape)
# --- build network and solve for decoders
im_dims = np.prod(im_shape)
n_eval_points = 5000
if 0:
# Van Hateren
patches = vanhateren.VanHateren().patches(n_eval_points, im_shape)
patches = vanhateren.preprocess.scale(patches)
patches.shape = (-1, im_dims)
eval_points = patches.reshape(-1, im_dims)
elif 0:
# More gabors
rf_shape = (32, 32)
thetas = rng.uniform(0, np.pi, size=n_eval_points)
# phases = rng.uniform(-0.5, 0.5, size=n)
freqs = rng.uniform(0.1, 2, size=n_eval_points)
eval_bank = gabor.bank(thetas, freqs=freqs, sigmas_y=10., shape=rf_shape)
inds = gabor.inds(len(eval_bank), im_shape, rf_shape, rng=rng)
eval_points = gabor.matrix_from_inds(eval_bank, inds, im_shape)
else:
# Low freq images
cutoff = 0.1
images = rng.normal(size=(n_eval_points,) + im_shape)
X = np.fft.fft2(images)
f0 = np.fft.fftfreq(X.shape[-2])
f1 = np.fft.fftfreq(X.shape[-1])
ff = np.sqrt(f0[:, None]**2 + f1[None, :]**2)
X[..., ff > cutoff] = 0
Y = np.fft.ifft2(X)
assert np.allclose(Y.imag, 0)
eval_points = Y.real.clip(-1, 1).reshape(-1, im_dims)
fig_uv = plt.figure(2)
plt.clf()
plt.subplot(211)
u_plot = plt.imshow(video[0], vmin=-1, vmax=1, cmap='gray')
plt.subplot(212)
v_plot = plt.imshow(video[0], vmin=-1, vmax=1, cmap='gray')
def video_input(t):
i = int(np.round(t / dt))
return video[i % len(video)].ravel()
def video_plot(t, y):
x = video_input(t).reshape(im_shape)
y = y.reshape(im_shape)
u_plot.set_data(x)
v_plot.set_data(y)
fig_uv.canvas.draw()
rmse_total = np.array(0.0)
def rmse_recorder(t, y):
x = video_input(t)
if t > 0:
rmse_total[...] += np.sqrt(((y - x)**2).mean())
net = nengo.Network(seed=9)
with net:
u = nengo.Node(video_input)
# a = nengo.Ensemble(n, im_dims, encoders=encoders)
a = nengo.Ensemble(n, im_dims, encoders=encoders, intercepts=nengo.dists.Choice([0]))
nengo.Connection(u, a)
# a = nengo.Ensemble(n, im_dims)
v = nengo.Node(size_in=im_dims, size_out=im_dims)
c = nengo.Connection(a, v, eval_points=eval_points, synapse=0.01)
w1 = nengo.Node(video_plot, size_in=im_dims)
w2 = nengo.Node(rmse_recorder, size_in=im_dims)
nengo.Connection(v, w1, synapse=None)
nengo.Connection(v, w2, synapse=None)
sim = nengo.Simulator(net)
# show reconstructed images
if 1:
from nengo.builder.connection import build_linear_system
plt.figure(3)
plt.clf()
_, A, y = build_linear_system(sim.model, c, None)
x = sim.data[c].decoders.T
r, c = 2, 5
axes = [[plt.subplot2grid((2*r, c), (2*i+k, j)) for k in range(2)]
for i in range(r) for j in range(c)]
for i in range(r * c):
show_image(axes[i][0], y[i].reshape(im_shape))
show_image(axes[i][1], np.dot(A[i], x).reshape(im_shape))
sim.run(1.)
print(rmse_total)
|
gpl-2.0
|
madjelan/scikit-learn
|
sklearn/linear_model/tests/test_theil_sen.py
|
234
|
9928
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
|
bsd-3-clause
|
etkirsch/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
conversationai/wikidetox
|
experimental/conversation_go_awry/get_annotation_data/get_annotation_data.py
|
1
|
11082
|
"""
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import pandas as pd
import hashlib
import itertools
import csv
import re
import os
def clean(s):
ret = s.replace('\t', ' ')
ret = ret.replace('\n', ' ')
# while (len(ret) >= 2 and ret[0] == '=' and ret[-1] == '='):
# ret = ret[1:-1]
while (len(ret) >= 1 and (ret[0] == ':' or ret[0] == '*')):
ret = ret[1:]
sub_patterns = [('EXTERNAL_LINK: ', ''), \
('\[REPLYTO: .*?\]', ''), \
('\[MENTION: .*?\]', ''), \
('\[OUTDENT: .*?\]', ''), \
('WIKI_LINK: ', '')]
for p, r in sub_patterns:
ret = re.sub(p, r, ret)
return ret
def update(snapshot, action):
Found = False
if not('user_text' in action):
action['user_text'] = 'Anonymous'
if action['comment_type'] == 'COMMENT_REMOVAL':
for ind, act in enumerate(snapshot):
if 'parent_id' in action and action['parent_id'] in act['parent_ids']:
act['status'] = 'removed'
status = 'removed'
snapshot[ind] = act
Found = True
if action['comment_type'] == 'COMMENT_RESTORATION':
for ind, act in enumerate(snapshot):
if 'parent_id' in action and action['parent_id'] in act['parent_ids']:
act['status'] = 'restored'
act['content'] = clean(action['content'])
act['timestamp_in_sec'] = action['timestamp_in_sec']
status = 'restored'
snapshot[ind] = act
Found = True
if action['comment_type'] == 'COMMENT_MODIFICATION':
found = False
for i, act in enumerate(snapshot):
if 'parent_id' in action and action['parent_id'] in act['parent_ids']:
found = True
pids = act['parent_ids']
new_act = {}
new_act['content'] = clean(action['content'])
new_act['id'] = action['id']
new_act['indentation'] = action['indentation']
new_act['comment_type'] = action['comment_type']
new_act['toxicity_score'] = action['score']
if 'bot' in action['user_text'].lower():
new_act['user_text'] = act['user_text']
else:
new_act['user_text'] = action['user_text']
new_act['timestamp'] = action['timestamp']
new_act['timestamp_in_sec'] = action['timestamp_in_sec']
new_act['page_title'] = action['page_title']
new_act['parent_ids'] = pids
new_act['status'] = 'content changed'
status = 'content changed'
new_act['relative_replyTo'] = -1
new_act['absolute_replyTo'] = -1
new_act['parent_ids'][action['id']] = True
for ind, a in enumerate(snapshot):
if action['replyTo_id'] in a['parent_ids']:
new_act['relative_replyTo'] = ind
new_act['absolute_replyTo'] = a['id']
snapshot[i] = new_act
Found = True
if not(found):
act = {}
act['content'] = clean(action['content'])
act['id'] = action['id']
act['indentation'] = action['indentation']
act['comment_type'] = 'COMMENT_ADDING' #action['comment_type']
act['toxicity_score'] = action['score']
act['user_text'] = action['user_text']
act['timestamp'] = action['timestamp']
act['timestamp_in_sec'] = action['timestamp_in_sec']
act['absolute_replyTo'] = -1
act['page_title'] = action['page_title']
act['status'] = 'just added'
status = 'just added'
act['relative_replyTo'] = -1
for ind, a in enumerate(snapshot):
if 'replyTo_id' in action and a['id'] == action['replyTo_id']:
act['relative_replyTo'] = ind
act['absolute_replyTo'] = a['id']
act['parent_ids'] = {action['id'] : True}
snapshot.append(act)
Found = True
if action['comment_type'] == 'COMMENT_ADDING' or action['comment_type'] == 'SECTION_CREATION':
act = {}
act['content'] = clean(action['content'])
act['id'] = action['id']
act['indentation'] = action['indentation']
act['comment_type'] = action['comment_type']
act['toxicity_score'] = action['score']
act['user_text'] = action['user_text']
act['timestamp'] = action['timestamp']
act['timestamp_in_sec'] = action['timestamp_in_sec']
act['page_title'] = action['page_title']
act['absolute_replyTo'] = -1
act['status'] = 'just added'
status = 'just added'
act['relative_replyTo'] = -1
Found = True
for ind, a in enumerate(snapshot):
if 'replyTo_id' in action and a['id'] == action['replyTo_id']:
act['relative_replyTo'] = ind
act['absolute_replyTo'] = a['id']
act['parent_ids'] = {action['id'] : True}
snapshot.append(act)
if not(Found): print(action)
return snapshot, status
def generate_snapshots(conv):
snapshot = [] # list of (text, user_text, user_id, timestamp, status, replyto, relative_reply_to)
for action in conv:
snapshot,status = update(snapshot, action)
return snapshot
def reformat(act):
output_dict = {key: act[key] for key in ['id', 'comment_type', 'content', 'timestamp', 'status', 'page_title', 'user_text']}
output_dict['parent_id'] = parse_absolute_replyTo(act['absolute_replyTo'])
# output_dict['hashed_user_id'] = hashlib.sha1(act['user_text'].encode('utf-8')).hexdigest()
return output_dict
def parse_absolute_replyTo(value):
if value == -1:
return ''
else:
return value
def main(constraint, job):
maxl = None
res = []
max_len = 0
path = '/scratch/wiki_dumps/expr_with_matching/' + constraint + '/data'
# os.system('cat %s/develop.json %s/train.json %s/develop.json > %s/all.json'%(path, path, path, path))
cnt = 0
# with open('annotated.json') as w:
# annotated = json.load(w)
# with open('annotated_2.json') as w:
# accepted = json.load(w)
# if job == 2:
# accepted = []
# if job == 1:
# with open('%s_conversations_with_reasonable_length.json'%(constraint)) as w:
# accepted = json.load(w)
# if job == 3:
# annotated = annotated + accepted
with open('/scratch/wiki_dumps/expr_with_matching/%s/data/altered.json'%(constraint)) as f:
for i, line in enumerate(f):
conv_id, clss, conversation = json.loads(line)
# if job < 3:
# if conv_id in annotated:
# continue
# else:
# if not(conv_id in annotated):
# continue
# if job == 1:
# if not(conv_id in accepted):
# continue
actions = sorted(conversation['action_feature'], key=lambda k: (k['timestamp_in_sec'], k['id'].split('.')[1], k['id'].split('.')[2]))
# not including the last action
end_time = max([a['timestamp_in_sec'] for a in actions])
if job == 1:
actions = [a for a in actions if a['timestamp_in_sec'] < end_time]
snapshot = generate_snapshots(actions)
last_comment = None
for ind, act in enumerate(snapshot):
if 'relative_replyTo' in act and not(act['relative_replyTo'] == -1)\
and not(act['relative_replyTo'] == ind):
act['absolute_replyTo'] = snapshot[act['relative_replyTo']]['id']
if act['timestamp_in_sec'] == end_time:
father = ind
depth = -1
while father:
depth += 1
if 'relative_replyTo' in snapshot[father] \
and not(snapshot[father]['relative_replyTo'] == -1)\
and not(snapshot[father]['relative_replyTo'] == father):
father = snapshot[father]['relative_replyTo']
else:
break
if depth > 10:
depth = -1
break
if depth == -1:
continue
if last_comment == None or depth > last_comment['depth']:
last_comment = act
last_comment['depth'] = depth
snapshot[ind] = act
if job == 3:
if not(last_comment == None):
res.append({'id': conv_id, 'comment': last_comment})
continue
ret = {act['id']:reformat(act) for act in snapshot if not(act['status'] == 'removed')}
length = len(ret.keys())
# if job == 2:
# if length > 10:
# cnt += 1
# else:
# accepted.append(conv_id)
# res.append(json.dumps(ret))
# else:
res.append(json.dumps(ret))
max_len = max(max_len, length)
# if maxl and i > maxl:
# break
# if job == 2:
# with open('%s_conversations_with_reasonable_length.json'%constraint, 'w') as w:
# json.dump(accepted, w)
df = pd.DataFrame(res)
if job < 3:
df.columns = ['conversations']
else:
return df
#conversations_as_json_job1.csv
os.system('mkdir /scratch/wiki_dumps/expr_with_matching/%s/annotations'%(constraint))
df.to_csv('/scratch/wiki_dumps/expr_with_matching/%s/annotations/conversations_as_json_cleaned_job%d.csv'%(constraint, job), chunksize=5000, encoding = 'utf-8', index=False, quoting=csv.QUOTE_ALL)
if __name__ == '__main__':
constraints = ['delta2_no_users_attacker_in_conv'] # 'delta2_no_users',
df = []
for c in constraints:
main(c, 2)
main(c, 1)
# df.append(main(c, 3)) # Only the last comment
print(c)
# df = pd.concat(df)
# df.to_csv('/scratch/wiki_dumps/expr_with_matching/toxicity_in_context.csv', chunksize=5000, encoding = 'utf-8', index=False, quoting=csv.QUOTE_ALL)
|
apache-2.0
|
fyffyt/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
230
|
7880
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
jmuhlich/rasmodel
|
ras_gdp_binding.py
|
6
|
2813
|
from rasmodel.scenarios.default import model
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from tbidbaxlipo.util import fitting
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
# In this first experiment, 0.5 uM (500 nM) of HRAS is used:
model.parameters['HRAS_0'].value = 500.
# We use this expression because we still get fluorescence even if GTP
# is hydrolyzed to GDP:
Expression('HRAS_mGXP_', model.observables['HRAS_mGTP_closed_'] +
model.observables['HRAS_mGDP_closed_'])
# We use the parameters calculated for experiments with mGTP at 5C
model.parameters['bind_HRASopen_GTP_kf'].value = 1e-2 # nM^-1 s^-1
model.parameters['bind_HRASopen_GTP_kr'].value = 1e-2 / (6.1e4 * 1e-9) # s^-1
model.parameters['equilibrate_HRASopenGTP_to_HRASclosedGTP_kf'].value = 4.5 #s^-1
model.parameters['equilibrate_HRASopenGTP_to_HRASclosedGTP_kr'].value = 0 #s^-1
plt.ion()
t = np.linspace(0, 10, 1000)
sol = Solver(model, t)
plt.figure()
k_list = []
# Perform titration
mgtp_concs = np.arange(1, 15) * 1000 # nM (1 - 15 uM)
for mgtp_conc in mgtp_concs:
# Titration of labeled GTP:
model.parameters['mGTP_0'].value = mgtp_conc
sol.run()
# Fit to an exponential function to extract the pseudo-first-order rates
k = fitting.Parameter(1.)
def expfunc(t):
return 500 * (1 - np.exp(-k()*t))
res = fitting.fit(expfunc, [k], sol.yexpr['HRAS_mGXP_'], t)
# Plot data and fits
plt.plot(t, sol.yexpr['HRAS_mGXP_'], color='b')
plt.plot(t, expfunc(t), color='r')
# Keep the fitted rate
k_list.append(k())
# Plot the scaling of rates with mGTP concentration
plt.figure()
plt.plot(mgtp_concs, k_list, marker='o')
plt.ylim(bottom=0)
# Figure 3:
# A constant amount of labeled GDP
model.parameters['mGDP_0'].value = 2.5 * 1000 # nM
model.parameters['mGTP_0'].value = 0
model.parameters['bind_HRASopen_GDP_kf'].value = 1e-2 # nM^-1 s^-1
model.parameters['bind_HRASopen_GDP_kr'].value = 1e-2 / (5.7e4 * 1e-9) # s^-1
model.parameters['equilibrate_HRASopenGDP_to_HRASclosedGDP_kf'].value = 3.2 #s^-1
model.parameters['equilibrate_HRASopenGDP_to_HRASclosedGDP_kr'].value = 5e-7 #s^-1
k_list = []
plt.figure()
gdp_concs = np.arange(0, 22) * 1000 # nM
for gdp_conc in gdp_concs:
# Titration of unlabeled GDP
model.parameters['GDP_0'].value = gdp_conc
sol.run()
k = fitting.Parameter(1.)
A = fitting.Parameter(100.)
def expfunc(t):
return A() * (1 - np.exp(-k()*t))
res = fitting.fit(expfunc, [A, k], sol.yexpr['HRAS_mGXP_'], t)
plt.plot(t, sol.yexpr['HRAS_mGXP_'], color='b')
plt.plot(t, expfunc(t), color='r')
k_list.append(k())
plt.figure()
plt.plot(gdp_concs, k_list, marker='o')
plt.ylim(bottom=0)
|
mit
|
rsignell-usgs/notebook
|
ROMS/ROMS Adriatic Velocity.py
|
2
|
3434
|
# coding: utf-8
# # ROMS layer velocity plot
# In[1]:
get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
import numpy as np
import netCDF4
# In[2]:
tidx = -1 # just get the final frame, for now.
scale = 0.03
isub = 3
url = 'http://geoport.whoi.edu/thredds/dodsC/examples/bora_feb.nc'
# In[3]:
def shrink(a,b):
"""Return array shrunk to fit a specified shape by triming or averaging.
a = shrink(array, shape)
array is an numpy ndarray, and shape is a tuple (e.g., from
array.shape). a is the input array shrunk such that its maximum
dimensions are given by shape. If shape has more dimensions than
array, the last dimensions of shape are fit.
as, bs = shrink(a, b)
If the second argument is also an array, both a and b are shrunk to
the dimensions of each other. The input arrays must have the same
number of dimensions, and the resulting arrays will have the same
shape.
Example
-------
>>> shrink(rand(10, 10), (5, 9, 18)).shape
(9, 10)
>>> map(shape, shrink(rand(10, 10, 10), rand(5, 9, 18)))
[(5, 9, 10), (5, 9, 10)]
"""
if isinstance(b, np.ndarray):
if not len(a.shape) == len(b.shape):
raise Exception, 'input arrays must have the same number of dimensions'
a = shrink(a,b.shape)
b = shrink(b,a.shape)
return (a, b)
if isinstance(b, int):
b = (b,)
if len(a.shape) == 1: # 1D array is a special case
dim = b[-1]
while a.shape[0] > dim: # only shrink a
if (dim - a.shape[0]) >= 2: # trim off edges evenly
a = a[1:-1]
else: # or average adjacent cells
a = 0.5*(a[1:] + a[:-1])
else:
for dim_idx in range(-(len(a.shape)),0):
dim = b[dim_idx]
a = a.swapaxes(0,dim_idx) # put working dim first
while a.shape[0] > dim: # only shrink a
if (a.shape[0] - dim) >= 2: # trim off edges evenly
a = a[1:-1,:]
if (a.shape[0] - dim) == 1: # or average adjacent cells
a = 0.5*(a[1:,:] + a[:-1,:])
a = a.swapaxes(0,dim_idx) # swap working dim back
return a
# In[4]:
def rot2d(x, y, ang):
'''rotate vectors by geometric angle'''
xr = x*np.cos(ang) - y*np.sin(ang)
yr = x*np.sin(ang) + y*np.cos(ang)
return xr, yr
# In[5]:
nc = netCDF4.Dataset(url)
mask = nc.variables['mask_rho'][:]
lon_rho = nc.variables['lon_rho'][:]
lat_rho = nc.variables['lat_rho'][:]
anglev = nc.variables['angle'][:]
u = nc.variables['u'][tidx, -1, :, :]
v = nc.variables['v'][tidx, -1, :, :]
u = shrink(u, mask[1:-1, 1:-1].shape)
v = shrink(v, mask[1:-1, 1:-1].shape)
u, v = rot2d(u, v, anglev[1:-1, 1:-1])
# In[6]:
lon_c = lon_rho[1:-1, 1:-1]
lat_c = lat_rho[1:-1, 1:-1]
# In[7]:
legend_vel=1.0
f = plt.figure(figsize=(12,12))
plt.subplot(111,aspect=(1.0/np.cos(np.mean(lat_c)*np.pi/180.0)))
plt.pcolormesh(lon_c,lat_c,np.sqrt(u*u + v*v))
Q = plt.quiver( lon_c[::isub,::isub], lat_c[::isub,::isub], u[::isub,::isub], v[::isub,::isub],
scale=1.0/scale, pivot='middle', zorder=1e35, width=0.003)
legend_str='%3.1f m/s' % legend_vel
qk = plt.quiverkey(Q,0.92,0.08,legend_vel,legend_str,labelpos='W')
# In[ ]:
|
mit
|
CloverHealth/airflow
|
tests/hooks/test_hive_hook.py
|
2
|
16206
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import os
import pandas as pd
import random
import mock
import unittest
from collections import OrderedDict
from hmsclient import HMSClient
from airflow.exceptions import AirflowException
from airflow.hooks.hive_hooks import HiveCliHook, HiveMetastoreHook, HiveServer2Hook
from airflow import DAG, configuration
from airflow.operators.hive_operator import HiveOperator
from airflow.utils import timezone
from airflow.utils.tests import assertEqualIgnoreMultipleSpaces
configuration.load_test_config()
DEFAULT_DATE = timezone.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
class HiveEnvironmentTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.next_day = (DEFAULT_DATE +
datetime.timedelta(days=1)).isoformat()[:10]
self.database = 'airflow'
self.partition_by = 'ds'
self.table = 'static_babynames_partitioned'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY ({{ params.partition_by }} string);
ALTER TABLE {{ params.table }}
ADD PARTITION({{ params.partition_by }}='{{ ds }}');
"""
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'partition_by': self.partition_by
},
hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
class TestHiveCliHook(unittest.TestCase):
def test_run_cli(self):
hook = HiveCliHook()
hook.run_cli("SHOW DATABASES")
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_file(self, mock_run_cli):
filepath = "/path/to/input/file"
table = "output_table"
hook = HiveCliHook()
hook.load_file(filepath=filepath, table=table, create=False)
query = (
"LOAD DATA LOCAL INPATH '{filepath}' "
"OVERWRITE INTO TABLE {table} \n"
.format(filepath=filepath, table=table)
)
mock_run_cli.assert_called_with(query)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df(self, mock_to_csv, mock_load_file):
df = pd.DataFrame({"c": ["foo", "bar", "baz"]})
table = "t"
delimiter = ","
encoding = "utf-8"
hook = HiveCliHook()
hook.load_df(df=df,
table=table,
delimiter=delimiter,
encoding=encoding)
mock_to_csv.assert_called_once()
kwargs = mock_to_csv.call_args[1]
self.assertEqual(kwargs["header"], False)
self.assertEqual(kwargs["index"], False)
self.assertEqual(kwargs["sep"], delimiter)
mock_load_file.assert_called_once()
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["delimiter"], delimiter)
self.assertEqual(kwargs["field_dict"], {"c": u"STRING"})
self.assertTrue(isinstance(kwargs["field_dict"], OrderedDict))
self.assertEqual(kwargs["table"], table)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.load_file')
@mock.patch('pandas.DataFrame.to_csv')
def test_load_df_with_optional_parameters(self, mock_to_csv, mock_load_file):
hook = HiveCliHook()
b = (True, False)
for create, recreate in itertools.product(b, b):
mock_load_file.reset_mock()
hook.load_df(df=pd.DataFrame({"c": range(0, 10)}),
table="t",
create=create,
recreate=recreate)
mock_load_file.assert_called_once()
kwargs = mock_load_file.call_args[1]
self.assertEqual(kwargs["create"], create)
self.assertEqual(kwargs["recreate"], recreate)
@mock.patch('airflow.hooks.hive_hooks.HiveCliHook.run_cli')
def test_load_df_with_data_types(self, mock_run_cli):
d = OrderedDict()
d['b'] = [True]
d['i'] = [-1]
d['t'] = [1]
d['f'] = [0.0]
d['c'] = ['c']
d['M'] = [datetime.datetime(2018, 1, 1)]
d['O'] = [object()]
d['S'] = ['STRING'.encode('utf-8')]
d['U'] = ['STRING']
d['V'] = [None]
df = pd.DataFrame(d)
hook = HiveCliHook()
hook.load_df(df, 't')
query = """
CREATE TABLE IF NOT EXISTS t (
b BOOLEAN,
i BIGINT,
t BIGINT,
f DOUBLE,
c STRING,
M TIMESTAMP,
O STRING,
S STRING,
U STRING,
V STRING)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS textfile
;
"""
assertEqualIgnoreMultipleSpaces(self, mock_run_cli.call_args_list[0][0][0], query)
class TestHiveMetastoreHook(HiveEnvironmentTest):
VALID_FILTER_MAP = {'key2': 'value2'}
def test_get_max_partition_from_empty_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs([],
'key1',
self.VALID_FILTER_MAP)
self.assertIsNone(max_partition)
def test_get_max_partition_from_valid_part_specs_and_invalid_filter_map(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
{'key3': 'value5'})
def test_get_max_partition_from_valid_part_specs_and_invalid_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key3',
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_partition_key(self):
with self.assertRaises(AirflowException):
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
None,
self.VALID_FILTER_MAP)
def test_get_max_partition_from_valid_part_specs_and_none_filter_map(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
None)
# No partition will be filtered out.
self.assertEqual(max_partition, b'value3')
def test_get_max_partition_from_valid_part_specs(self):
max_partition = \
HiveMetastoreHook._get_max_partition_from_part_specs(
[{'key1': 'value1', 'key2': 'value2'},
{'key1': 'value3', 'key2': 'value4'}],
'key1',
self.VALID_FILTER_MAP)
self.assertEqual(max_partition, b'value1')
def test_get_metastore_client(self):
self.assertIsInstance(self.hook.get_metastore_client(), HMSClient)
def test_get_conn(self):
self.assertIsInstance(self.hook.get_conn(), HMSClient)
def test_check_for_partition(self):
partition = "{p_by}='{date}'".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}='{date}'".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_partition(self.database, self.table,
partition)
)
self.assertFalse(
self.hook.check_for_partition(self.database, self.table,
missing_partition)
)
def test_check_for_named_partition(self):
partition = "{p_by}={date}".format(date=DEFAULT_DATE_DS,
p_by=self.partition_by)
missing_partition = "{p_by}={date}".format(date=self.next_day,
p_by=self.partition_by)
self.assertTrue(
self.hook.check_for_named_partition(self.database,
self.table,
partition)
)
self.assertFalse(
self.hook.check_for_named_partition(self.database,
self.table,
missing_partition)
)
def test_get_table(self):
table_info = self.hook.get_table(db=self.database,
table_name=self.table)
self.assertEqual(table_info.tableName, self.table)
columns = ['state', 'year', 'name', 'gender', 'num']
self.assertEqual([col.name for col in table_info.sd.cols], columns)
def test_get_tables(self):
tables = self.hook.get_tables(db=self.database,
pattern=self.table + "*")
self.assertIn(self.table, {table.tableName for table in tables})
def test_get_databases(self):
databases = self.hook.get_databases(pattern='*')
self.assertIn(self.database, databases)
def test_get_partitions(self):
partitions = self.hook.get_partitions(schema=self.database,
table_name=self.table)
self.assertEqual(len(partitions), 1)
self.assertEqual(partitions, [{self.partition_by: DEFAULT_DATE_DS}])
def test_max_partition(self):
filter_map = {self.partition_by: DEFAULT_DATE_DS}
partition = self.hook.max_partition(schema=self.database,
table_name=self.table,
field=self.partition_by,
filter_map=filter_map)
self.assertEqual(partition, DEFAULT_DATE_DS.encode('utf-8'))
def test_table_exists(self):
self.assertTrue(self.hook.table_exists(self.table, db=self.database))
self.assertFalse(
self.hook.table_exists(str(random.randint(1, 10000)))
)
class TestHiveServer2Hook(unittest.TestCase):
def _upload_dataframe(self):
df = pd.DataFrame({'a': [1, 2], 'b': [1, 2]})
self.local_path = '/tmp/TestHiveServer2Hook.csv'
df.to_csv(self.local_path, header=False, index=False)
def setUp(self):
configuration.load_test_config()
self._upload_dataframe()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG('test_dag_id', default_args=args)
self.database = 'airflow'
self.table = 'hive_server_hook'
self.hql = """
CREATE DATABASE IF NOT EXISTS {{ params.database }};
USE {{ params.database }};
DROP TABLE IF EXISTS {{ params.table }};
CREATE TABLE IF NOT EXISTS {{ params.table }} (
a int,
b int)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',';
LOAD DATA LOCAL INPATH '{{ params.csv_path }}'
OVERWRITE INTO TABLE {{ params.table }};
"""
self.columns = ['{}.a'.format(self.table),
'{}.b'.format(self.table)]
self.hook = HiveMetastoreHook()
t = HiveOperator(
task_id='HiveHook_' + str(random.randint(1, 10000)),
params={
'database': self.database,
'table': self.table,
'csv_path': self.local_path
},
hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def tearDown(self):
hook = HiveMetastoreHook()
with hook.get_conn() as metastore:
metastore.drop_table(self.database, self.table, deleteData=True)
os.remove(self.local_path)
def test_get_conn(self):
hook = HiveServer2Hook()
hook.get_conn()
def test_get_records(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_records(query, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
def test_get_pandas_df(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
df = hook.get_pandas_df(query, schema=self.database)
self.assertEqual(len(df), 2)
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
def test_get_results_header(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual([col[0] for col in results['header']],
self.columns)
def test_get_results_data(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
results = hook.get_results(query, schema=self.database)
self.assertListEqual(results['data'], [(1, 1), (2, 2)])
def test_to_csv(self):
hook = HiveServer2Hook()
query = "SELECT * FROM {}".format(self.table)
csv_filepath = 'query_results.csv'
hook.to_csv(query, csv_filepath, schema=self.database,
delimiter=',', lineterminator='\n', output_header=True)
df = pd.read_csv(csv_filepath, sep=',')
self.assertListEqual(df.columns.tolist(), self.columns)
self.assertListEqual(df[self.columns[0]].values.tolist(), [1, 2])
self.assertEqual(len(df), 2)
def test_multi_statements(self):
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"SELECT * FROM {}".format(self.table),
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
results = hook.get_records(sqls, schema=self.database)
self.assertListEqual(results, [(1, 1), (2, 2)])
|
apache-2.0
|
jswanljung/iris
|
docs/iris/src/userguide/regridding_plots/regridded_to_global_area_weighted.py
|
17
|
1646
|
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import iris
import iris.analysis
import iris.plot as iplt
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
global_air_temp = iris.load_cube(iris.sample_data_path('air_temp.pp'))
regional_ash = iris.load_cube(iris.sample_data_path('NAME_output.txt'))
regional_ash = regional_ash.collapsed('flight_level', iris.analysis.SUM)
# Mask values so low that they are anomalous.
regional_ash.data = np.ma.masked_less(regional_ash.data, 5e-6)
norm = matplotlib.colors.LogNorm(5e-6, 0.0175)
global_air_temp.coord('longitude').guess_bounds()
global_air_temp.coord('latitude').guess_bounds()
fig = plt.figure(figsize=(8, 4.5))
plt.subplot(2, 2, 1)
iplt.pcolormesh(regional_ash, norm=norm)
plt.title('Volcanic ash total\nconcentration not regridded',
size='medium')
for subplot_num, mdtol in zip([2, 3, 4], [0, 0.5, 1]):
plt.subplot(2, 2, subplot_num)
scheme = iris.analysis.AreaWeighted(mdtol=mdtol)
global_ash = regional_ash.regrid(global_air_temp, scheme)
iplt.pcolormesh(global_ash, norm=norm)
plt.title('Volcanic ash total concentration\n'
'regridded with AreaWeighted(mdtol={})'.format(mdtol),
size='medium')
plt.subplots_adjust(hspace=0, wspace=0.05,
left=0.001, right=0.999, bottom=0, top=0.955)
# Iterate over each of the figure's axes, adding coastlines, gridlines
# and setting the extent.
for ax in fig.axes:
ax.coastlines('50m')
ax.gridlines()
ax.set_extent([-80, 40, 31, 75])
plt.show()
|
lgpl-3.0
|
ghchinoy/tensorflow
|
tensorflow/examples/get_started/regression/imports85.py
|
41
|
6589
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (
tf.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
|
apache-2.0
|
kensugino/jGEM
|
jgem/merge3.py
|
1
|
61403
|
"""
.. module:: merge3
:synopsis: merge assemblies from different cell types
jGEM version 3 merger
.. moduleauthor:: Ken Sugino <[email protected]>
"""
# system imports
import subprocess
import multiprocessing
import gzip
import os
import time
import shutil
from functools import reduce
from operator import iadd, iand
from collections import Counter
from itertools import repeat
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
# 3rd party imports
import pandas as PD
import numpy as N
import matplotlib.pyplot as P
# LocalAssembler imports
from collections import Counter
from matplotlib.collections import BrokenBarHCollection
from functools import partial, reduce
from operator import iadd
import bisect
from scipy.optimize import nnls
# library imports
from jgem import utils as UT
from jgem import bigwig as BW
from jgem import bedtools as BT
from jgem import gtfgffbed as GGB
from jgem import taskqueue as TQ
from jgem import assembler3 as A3
import jgem.cy.bw as cybw
############# Merge Prep ######################################################
class PrepBWSJ(object):
def __init__(self, j2pres, genome, dstpre, libsizes=None, np=10):
self.j2pres = j2pres
self.libsizes = libsizes # scale = 1e6/libsize
self.genome = genome
self.dstpre = dstpre
self.np = np
def __call__(self):
# exdf => ex.p, ex.n, ex.u
# sjdf => sj.p, sj.n, sj.u
# paths => sjpath.bed
# divide into tasks (exdf,sjdf,paths) x chroms
self.server = server = TQ.Server(name='PrepBWSJ', np=self.np)
self.chroms = chroms = UT.chroms(self.genome)
csizes = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size')
self.exstatus = exstatus = {}
self.sjstatus = sjstatus = {}
self.pastatus = pastatus = {}
self.sdstatus = sdstatus = {}
exdone=False
sjdone=False
padone=False
sddone=False
with server:
for chrom in chroms:
# exdf tasks
tname = 'prep_exwig_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom, csizes[chrom])
task = TQ.Task(tname, prep_exwig_chr, args)
server.add_task(task)
# exdf tasks
tname = 'prep_sjwig_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom, csizes[chrom])
task = TQ.Task(tname, prep_sjwig_chr, args)
server.add_task(task)
# exdf tasks
tname = 'prep_sjpath_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom)
task = TQ.Task(tname, prep_sjpath_chr, args)
server.add_task(task)
tname = 'prep_sjdf_chr.{0}'.format(chrom)
args = (self.j2pres, self.libsizes, self.dstpre, chrom)
task = TQ.Task(tname, prep_sjdf_chr, args)
server.add_task(task)
while server.check_error():
try:
name, rslt = server.get_result(timeout=5) # block until result come in
except TQ.Empty:
name, rslt = None, None
if name is not None:
if name.startswith('prep_exwig_chr.'):
chrom = name.split('.')[1]
exstatus[chrom] = rslt
if len(exstatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_exbw $$$$$$$$$$$')
tname='prep_exbw'
args = (self.dstpre, chroms, self.genome)
task = TQ.Task(tname, prep_exbw, args)
server.add_task(task)
if name.startswith('prep_sjwig_chr.'):
chrom = name.split('.')[1]
sjstatus[chrom] = rslt
if len(sjstatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_sjbw $$$$$$$$$$$')
tname='prep_sjbw'
args = (self.dstpre, chroms, self.genome)
task = TQ.Task(tname, prep_sjbw, args)
server.add_task(task)
if name.startswith('prep_sjpath_chr.'):
chrom = name.split('.')[1]
pastatus[chrom] = rslt
if len(pastatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_sjpath $$$$$$$$$$$')
tname='prep_sjpath'
args = (self.dstpre, chroms)
task = TQ.Task(tname, prep_sjpath, args)
server.add_task(task)
if name.startswith('prep_sjdf_chr.'):
chrom = name.split('.')[1]
sdstatus[chrom] = rslt
if len(sdstatus)==len(chroms): # all finished
print('$$$$$$$$ putting in prep_sjdf $$$$$$$$$$$')
tname='prep_sjdf'
args = (self.dstpre, chroms)
task = TQ.Task(tname, prep_sjdf, args)
server.add_task(task)
if name=='prep_exbw':
print('$$$$$$$$ prep_exbw done $$$$$$$$$$$')
exdone=True
if name=='prep_sjbw':
print('$$$$$$$$ prep_sjbw done $$$$$$$$$$$')
sjdone=True
if name=='prep_sjpath':
print('$$$$$$$$ prep_sjpath done $$$$$$$$$$$')
padone=True
if name=='prep_sjdf':
print('$$$$$$$$ prep_sjdf done $$$$$$$$$$$')
sddone=True
if exdone&sjdone&padone&sddone:
break
print('Exit Loop')
print('Done')
def prep_exwig_chr(j2pres, libsizes, dstpre, chrom, csize):
ss = ['p','n','u']
s2s = {'p':['+'],'n':['-'],'u':['.+','.-','.']}
a = {s:N.zeros(csize) for s in ss}
wigpaths = {s:dstpre+'.ex.{0}.{1}.wig'.format(s,chrom) for s in ss}
if all([os.path.exists(dstpre+'.ex.{0}.bw'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(dstpre+'.ex.{0}.wig'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(wigpaths[s]) for s in ss]):
return wigpaths
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
exdf = UT.read_pandas(pre+'.exdf.txt.gz',names=A3.EXDFCOLS)
exdf = exdf[exdf['chr']==chrom]
for s in ss:
exsub = exdf[exdf['strand'].isin(s2s[s])]
for st,ed,ecov in exsub[['st','ed','ecov']].values:
a[s][st:ed] += ecov*scale
sedf = UT.read_pandas(pre+'.sedf.txt.gz',names=A3.EXDFCOLS)
sedf = sedf[sedf['chr']==chrom]
for s in ss:
sesub = sedf[sedf['strand'].isin(s2s[s])]
for st,ed,ecov in sesub[['st','ed','ecov']].values:
a[s][st:ed] += ecov*scale
for s in ['p','n','u']:
if libsizes is not None:
a[s] /= float(n) # average
cybw.array2wiggle_chr64(a[s], chrom, wigpaths[s], 'w')
return wigpaths
def prep_sjwig_chr(j2pres, libsizes, dstpre, chrom, csize):
ss = ['p','n','u']
s2s = {'p':['+'],'n':['-'],'u':['.+','.-']}
a = {s:N.zeros(csize) for s in ss}
wigpaths = {s:dstpre+'.sj.{0}.{1}.wig'.format(s,chrom) for s in ss}
if all([os.path.exists(dstpre+'.sj.{0}.bw'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(dstpre+'.sj.{0}.wig'.format(s)) for s in ss]):
return wigpaths
if all([os.path.exists(wigpaths[s]) for s in ss]):
return wigpaths
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
sjdf = UT.read_pandas(pre+'.sjdf.txt.gz',names=A3.SJDFCOLS)
sjdf = sjdf[sjdf['chr']==chrom]
for s in ss:
sjsub = sjdf[sjdf['strand'].isin(s2s[s])]
for st,ed,tcnt in sjsub[['st','ed','tcnt']].values:
a[s][st:ed] += tcnt*scale
for s in ['p','n','u']:
if libsizes is not None:
a[s] /= float(n) # average
cybw.array2wiggle_chr64(a[s], chrom, wigpaths[s], 'w')
return wigpaths
def prep_sjpath_chr(j2pres, libsizes, dstpre, chrom):
pc2st = {}
pc2ed = {}
pc2tst = {}
pc2ted = {}
pc2strand = {}
pc2tcov = {}
# pc2tcov0 = {}
# chr,st,ed,name,sc1(tcov),strand,tst,ted,sc2(),#exons,estarts,esizes
# cols = ['st','ed','name','strand','tst','ted','tcov0','tcov']
path = dstpre+'.sjpath.{0}.bed.gz'.format(chrom)
path0 = dstpre+'.sjpath.bed.gz'
if os.path.exists(path0):
return path
if os.path.exists(path):
return path
cols = ['st','ed','name','strand','tst','ted','tcov']
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
paths = UT.read_pandas(pre+'.paths.txt.gz', names=A3.PATHCOLS)
paths = paths[paths['chr']==chrom]
for st,ed,name,s,tst,ted,tcov in paths[cols].values:
pc = ','.join(name.split(',')[1:-1]) # trim 53exons => intron chain
if pc=='':
continue # ignore no junction path
pc2st[pc] = min(st, pc2st.get(pc,st))
pc2ed[pc] = max(ed, pc2ed.get(pc,ed))
pc2tst[pc] = tst
pc2ted[pc] = ted
pc2strand[pc] = s
pc2tcov[pc] = pc2tcov.get(pc,0)+scale*tcov
#pc2tcov0[pc] = pc2tcov0.get(pc,0)+scale*tcov0
df = PD.DataFrame({'st':pc2st,'ed':pc2ed,'tst':pc2tst,'ted':pc2ted,
'strand':pc2strand,'tcov':pc2tcov})
df['chr'] = chrom
df.index.name = 'name'
df.reset_index(inplace=True)
# create bed12: parse name => #exons, esizes, estarts
df['pc'] = df['name'].copy()
idxp = df['strand'].isin(['+','.+'])
if libsizes is not None:
df['tcov'] = df['tcov']/float(n)
df.loc[idxp,'name'] = ['{0},{1},{2}'.format(s,p,e) for s,p,e in df[idxp][['st','pc','ed']].values]
df.loc[~idxp,'name'] = ['{2},{1},{0}'.format(s,p,e) for s,p,e in df[~idxp][['st','pc','ed']].values]
df = df.groupby('pc').first() # get rid of unstranded duplicates
cmax = 9+N.log2(N.mean(scales))
bed = A3.path2bed12(df, cmax)
# reset sc1 to tcov (from log2(tcov+2)*100)
bed['sc1'] = bed['tcov']
GGB.write_bed(bed, path, ncols=12)
return path
def prep_sjdf_chr(j2pres, libsizes, dstpre, chrom):
pc2st = {}
pc2ed = {}
pc2strand = {}
pc2tcnt = {}
pc2ucnt = {}
# chr,st,ed,name,sc1(tcov),strand,tst,ted,sc2(),#exons,estarts,esizes
# cols = ['st','ed','name','strand','tst','ted','tcov0','tcov']
path = dstpre+'.sjdf.{0}.txt.gz'.format(chrom)
path0 = dstpre+'.sjdf.txt.gz'
if os.path.exists(path0):
return path
if os.path.exists(path):
return path
cols = ['st','ed','name','strand','st','ed','tcnt','ucnt']
# cols = A3.SJDFCOLS
if libsizes is None:
n = 1
scales = N.ones(len(j2pres))
else:
n = len(j2pres)
scales = [1e6/float(x) for x in libsizes]
for pre,scale in zip(j2pres, scales):
paths = UT.read_pandas(pre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
paths = paths[paths['chr']==chrom]
for st,ed,pc,s,st,ed,tcnt,ucnt in paths[cols].values:
pc2st[pc] = st
pc2ed[pc] = ed
pc2strand[pc] = s
pc2tcnt[pc] = pc2tcnt.get(pc,0)+scale*tcnt
pc2ucnt[pc] = pc2ucnt.get(pc,0)+scale*ucnt
df = PD.DataFrame({'st':pc2st,'ed':pc2ed,'st':pc2st,'ed':pc2ed,
'strand':pc2strand,'tcnt':pc2tcnt,'ucnt':pc2ucnt})
df['chr'] = chrom
df['kind'] = 'j'
if libsizes is not None:
df['tcnt'] = df['tcnt']/float(n)
df['ucnt'] = df['ucnt']/float(n)
df.index.name = 'name'
df.reset_index(inplace=True)
UT.write_pandas(df[A3.SJDFCOLS], path, '')
return path
def prep_exbw(dstpre, chroms, genome):
return _prep_bw(dstpre, chroms, genome, 'ex')
def prep_sjbw(dstpre, chroms, genome):
return _prep_bw(dstpre, chroms, genome, 'sj')
def _prep_bw(dstpre, chroms, genome, w):
# concatenate
ss = ['p','n','u']
files = []
bwpaths = {s: dstpre+'.{1}.{0}.bw'.format(s,w) for s in ss}
if all([os.path.exists(bwpaths[s]) for s in ss]):
return bwpaths
for s in ss:
dstwig = dstpre+'.{1}.{0}.wig'.format(s,w)
with open(dstwig, 'wb') as dst:
for c in chroms:
srcpath = dstpre+'.{2}.{0}.{1}.wig'.format(s,c,w)
with open(srcpath,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
files.append(dstwig)
print('converting wig to bigwig {0}'.format(dstwig))
BT.wig2bw(dstwig, UT.chromsizes(genome), bwpaths[s])
# clean up
for f in files:
os.unlink(f)
return bwpaths
def prep_sjpath(dstpre, chroms):
dstpath = dstpre+'.sjpath.bed.gz'
if os.path.exists(dstpath):
return dstpath
files = []
with open(dstpath, 'wb') as dst:
for c in chroms:
srcpath = dstpre+'.sjpath.{0}.bed.gz'.format(c)
with open(srcpath,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
# for f in files: # keep separate chr files
# os.unlink(f)
return dstpath
def prep_sjdf(dstpre, chroms):
dstpath = dstpre+'.sjdf.txt.gz'
if os.path.exists(dstpath):
return dstpath
files = []
with open(dstpath, 'wb') as dst:
for c in chroms:
srcpath = dstpre+'.sjdf.{0}.txt.gz'.format(c)
with open(srcpath,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
# for f in files: # keep separate chr files
# os.unlink(f)
return dstpath
############# SJ Filter #######################################################
SJFILTERPARAMS = dict(
th_detected=1,
th_maxcnt=1,
th_maxoverhang=15,
th_minedgeexon=15,
th_sjratio=1e-3,
filter_unstranded=False,# there are substantial number of high cov unstranded
)
class SJFilter(object):
def __init__(self, bwsjpre, statspath, genome, np=10, **kw):
self.bwsjpre = bwsjpre
self.statspath = statspath
self.genome = genome
self.np = np
self.params = SJFILTERPARAMS.copy()
self.params.update(kw)
def __call__(self):
chroms = UT.chroms(self.genome)
csizedic = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size')
args = []
for c in chroms:
csize = csizedic[c]
args.append((self.bwsjpre, self.statspath, c, csize, self.params))
rslts = UT.process_mp(filter_sjpath, args, np=self.np, doreduce=False)
dstpath = self.bwsjpre+'.filtered.sjpath.bed.gz'
with open(dstpath,'wb') as dst:
for c in chroms:
srcpath = self.bwsjpre+'.filtered.sjpath.{0}.bed.gz'.format(c)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src, dst)
rslts = UT.process_mp(filter_sjdf, args, np=self.np, doreduce=False)
dstpath = self.bwsjpre+'.filtered.sjdf.txt.gz'
with open(dstpath,'wb') as dst:
for c in chroms:
srcpath = self.bwsjpre+'.filtered.sjdf.{0}.txt.gz'.format(c)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src, dst)
# make sj.bw
sjfiltered2bw(self.bwsjpre, self.genome, self.np)
for s in ['p','n','u']:
src = self.bwsjpre + '.ex.{0}.bw'.format(s)
dst = self.bwsjpre + '.filtered.ex.{0}.bw'.format(s)
cmd = ['ln','-s', src, dst]
subprocess.call(cmd)
def locus2pc(l):
chrom,sted,strand = l.split(':')
st,ed = sted.split('-')
st = str(int(st)-1)
if strand in ['+','.']:
return '|'.join([st,ed])
return '|'.join([ed,st])
def filter_sjpath(bwsjpre, statspath, chrom, csize, params):
# read in junction stats
stats = UT.read_pandas(statspath)
if 'chr' not in stats:
stats['chr'] = [x.split(':')[0] for x in stats['locus']]
if '#detected' in stats:
stats.rename(columns={'#detected':'detected'}, inplace=True)
stats = stats[stats['chr']==chrom].copy()
if 'pc' not in stats:
stats['pc'] = [locus2pc(x) for x in stats['locus']]
flds = ['detected','maxcnt','maxoverhang']
dics = {f: UT.df2dict(stats, 'pc', f) for f in flds}
# read sjpath
fpath_chr = bwsjpre+'.sjpath.{0}.bed.gz'.format(chrom)
dstpath = bwsjpre+'.filtered.sjpath.{0}.bed.gz'.format(chrom)
if os.path.exists(fpath_chr):
sj = GGB.read_bed(fpath_chr)
else:
fpath = bwsjpre+'.sjpath.bed.gz'
sj = GGB.read_bed(fpath)
sj = sj[sj['chr']==chrom].copy()
name0 = sj.iloc[0]['name']
if len(name0.split('|'))<len(name0.split(',')): # exons attached?
sj['name'] = [','.join(x.split(',')[1:-1]) for x in sj['name']]
# filter unstranded
if params['filter_unstranded']:
sj = sj[sj['strand'].isin(['+','-'])].copy()
# filter with stats
for f in flds:
sj[f] = [N.min([dics[f].get(x,0) for x in y.split(',')]) for y in sj['name']]
sj = sj[sj[f]>params['th_'+f]].copy() # filter
# edge exon size
sj['eflen'] = [int(x.split(',')[0]) for x in sj['esizes']]
sj['ellen'] = [int(x.split(',')[-2]) for x in sj['esizes']]
eth = params['th_minedgeexon']
sj = sj[(sj['eflen']>eth)&(sj['ellen']>eth)].copy()
# calculate sjratio, sjratio
if params['filter_unstranded']:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=False)
else:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=True)
with sjexbw:
sa = sjexbw.bws['sj']['a'].get(chrom,0,csize)
ea = sjexbw.bws['ex']['a'].get(chrom,0,csize)
a = sa+ea
# sj['sjratio'] = [x/N.mean(a[int(s):int(e)]) for x,s,e in sj[['sc1','tst','ted']].values]
sj['sjratio'] = [x/N.max(a[int(s):int(e)]) for x,s,e in sj[['sc1','tst','ted']].values]
sj = sj[sj['sjratio']>params['th_sjratio']]
GGB.write_bed(sj, dstpath, ncols=12)
def filter_sjdf(bwsjpre, statspath, chrom, csize, params):
# read in junction stats
stats = UT.read_pandas(statspath)
if 'chr' not in stats:
stats['chr'] = [x.split(':')[0] for x in stats['locus']]
if '#detected' in stats:
stats.rename(columns={'#detected':'detected'}, inplace=True)
stats = stats[stats['chr']==chrom].copy()
if 'pc' not in stats:
stats['pc'] = [locus2pc(x) for x in stats['locus']]
flds = ['detected','maxcnt','maxoverhang']
dics = {f: UT.df2dict(stats, 'pc', f) for f in flds}
# read sjdf
fpath_chr = bwsjpre+'.sjdf.{0}.txt.gz'.format(chrom)
dstpath = bwsjpre+'.filtered.sjdf.{0}.txt.gz'.format(chrom)
if os.path.exists(fpath_chr):
sj = UT.read_pandas(fpath_chr, names=A3.SJDFCOLS)
else:
fpath = bwsjpre+'.sjdf.txt.gz'
sj = UT.read_pandas(fpath, names=A3.SJDFCOLS)
sj = sj[sj['chr']==chrom].copy()
# filter unstranded
if params['filter_unstranded']:
sj = sj[sj['strand'].isin(['+','-'])].copy()
# filter with stats
for f in flds:
# sj[f] = [N.min([dics[f].get(x,0) for x in y.split(',')]) for y in sj['name']]
sj[f] = [dics[f].get(y,0) for y in sj['name']]
sj = sj[sj[f]>params['th_'+f]].copy() # filter
# edge exon size
# sj['eflen'] = [int(x.split(',')[0]) for x in sj['esizes']]
# sj['ellen'] = [int(x.split(',')[-2]) for x in sj['esizes']]
# eth = params['th_minedgeexon']
# sj = sj[(sj['eflen']>eth)&(sj['ellen']>eth)].copy()
# calculate sjratio, sjratio
if params['filter_unstranded']:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=False)
else:
sjexbw = A3.SjExBigWigs(bwsjpre, mixunstranded=True)
with sjexbw:
sa = sjexbw.bws['sj']['a'].get(chrom,0,csize)
ea = sjexbw.bws['ex']['a'].get(chrom,0,csize)
a = sa+ea
# sj['sjratio'] = [x/N.mean(a[int(s):int(e)]) for x,s,e in sj[['tcnt','st','ed']].values]
sj['sjratio'] = [x/N.max(a[int(s):int(e)]) for x,s,e in sj[['tcnt','st','ed']].values]
sj = sj[sj['sjratio']>params['th_sjratio']]
UT.write_pandas(sj[A3.SJDFCOLS], dstpath, '')
def sjfiltered2wig(bwpre, chrom, chromsize):
a = {'+':N.zeros(chromsize, dtype=N.float64),
'-':N.zeros(chromsize, dtype=N.float64),
'.':N.zeros(chromsize, dtype=N.float64)}
path = bwpre+'.filtered.sjdf.{0}.txt.gz'.format(chrom)
sjchr = UT.read_pandas(path, names=A3.SJDFCOLS)
for st,ed,v,strand in sjchr[['st','ed','tcnt','strand']].values:
a[strand[0]][st:ed] += v
for strand in a:
wig = bwpre+'.filtered.sjdf.{0}.{1}.wig'.format(chrom, strand)
cybw.array2wiggle_chr64(a[strand], chrom, wig)
return path
def sjfiltered2bw(bwpre, genome, np=12):
chroms = UT.chroms(genome)
chromdf = UT.chromdf(genome).sort_values('size',ascending=False)
chroms = [x for x in chromdf['chr'] if x in chroms]
chromdic = UT.df2dict(chromdf, 'chr', 'size')
args = [(bwpre, c, chromdic[c]) for c in chroms]
rslts = UT.process_mp(sjfiltered2wig, args, np=np, doreduce=False)
S2N = {'+':'p','-':'n','.':'u'}
rmfiles = []
for strand in ['+','-','.']:
s = S2N[strand]
wigpath = bwpre+'.filtered.sj.{0}.wig'.format(s)
with open(wigpath, 'w') as dst:
for chrom in chroms:
f = bwpre+'.filtered.sjdf.{0}.{1}.wig'.format(chrom, strand)
with open(f,'r') as src:
shutil.copyfileobj(src, dst)
rmfiles.append(f)
bwpath = bwpre+'.filtered.sj.{0}.bw'.format(s)
BT.wig2bw(wigpath, UT.chromsizes(genome), bwpath)
rmfiles.append(wigpath)
for f in rmfiles:
os.unlink(f)
############# Cov Estimator ######################################################
class LocalEstimator(A3.LocalAssembler):
def __init__(self, modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom=False):
self.modelpre = modelpre
self.tcovth = tcovth
self.usegeom = usegeom
A3.LocalAssembler.__init__(self, bwpre, chrom, st, ed, dstpre)
bed12 = GGB.read_bed(modelpre+'.paths.withse.bed.gz')
assert(all(bed12['tst']<bed12['ted']))
idx = (bed12['chr']==chrom)&(bed12['tst']>=st)&(bed12['ted']<=ed)
self.paths = bed12[idx].copy()
eids = set()
sids = set()
for n in self.paths['name']:
eids.update(n.split('|'))
sids.update(n.split(',')[1:-1])
tgt1 = bwpre+'.filtered.{0}.bed.gz'.format(chrom)
tgt2 = bwpre+'.{0}.bed.gz'.format(chrom)
tgt3 = bwpre+'.sjpath.bed.gz'
if os.path.exists(tgt1):
sj = GGB.read_bed(tgt1)
elif os.path.exists(tgt2):
sj = GGB.read_bed(tgt2)
else:
sj = GGB.read_bed(tgt3)
idx0 = (sj['chr']==chrom)&(sj['tst']>=st)&(sj['ted']<=ed)
self.sjpaths0 = sj[idx0].copy()
# load exdf, sjdf
sjdf = UT.read_pandas(modelpre+'.sjdf.txt.gz', names=A3.SJDFCOLS)
sjdf['tst'] = sjdf['st'] # for sjpath compatibility
sjdf['ted'] = sjdf['ed']
sjdf['sc1'] = sjdf['ucnt']
sjdf['sc2'] = sjdf['tcnt']
sjdf = sjdf[(sjdf['chr']==chrom)&(sjdf['st']>=st)&(sjdf['ed']<=ed)]
sjdf = sjdf[sjdf['name'].isin(sids)]
self.sjdf = sjdf.groupby(['chr','st','ed','strand']).first().reset_index()
exdf = UT.read_pandas(modelpre+'.exdf.txt.gz', names=A3.EXDFCOLS)
exdf = exdf[(exdf['chr']==chrom)&(exdf['st']>=st)&(exdf['ed']<=ed)]
exdf = exdf[exdf['name'].isin(eids)]
if os.path.exists(modelpre+'.sedf.txt.gz'):
sedf = UT.read_pandas(modelpre+'.sedf.txt.gz', names=A3.EXDFCOLS)
sedf = sedf[(sedf['chr']==chrom)&(sedf['st']>=st)&(sedf['ed']<=ed)]
sedf = sedf[sedf['name'].isin(eids)]
exdf = PD.concat([exdf,sedf],ignore_index=True)
self.exdf = exdf.groupby(['chr','st','ed','strand','kind']).first().reset_index()
A3.set_ad_pos(self.sjdf, 'sj')
A3.set_ad_pos(self.exdf, 'ex')
# filled
self.filled = {}
sjs = self.sjdf
exs = self.exdf[self.exdf['kind']=='i'].copy()
exs['ost'] = exs['st']-self.st
exs['oed'] = exs['ed']-self.st
for s in ['+','-']:
sja = self.arrs['sj'][s]
sj = sjs[sjs['strand'].isin(A3.STRS[s])]
ex = exs[exs['strand'].isin(A3.STRS[s])]
self.filled[s] = A3.fill_gap(sja, sj, ex, s, self.st)
# fix_i53completematch(self.exdf, self.paths) # extend 5'3' exons completely matched internal exons
def process(self):
self.calculate_ecovs()
self.calculate_scovs()
self.estimate_abundance()
self.write()
return
def calculate_scovs(self):
sj = self.sjdf
sj0 = self.sjpaths0
sj0mat = sj0[['sc1','sc2','name']].values
tmp = [[(sc1,sc2) for sc1,sc2,p in sj0mat if y in p] for y in sj['name']]
sj['ucnt'] = [N.sum([x[0] for x in y]) for y in tmp]
sj['tcnt'] = [N.sum([x[1] for x in y]) for y in tmp]
self.sjdfi = sj.set_index('name')
def calculate_branchp(self, jids, eids):
sj0 = self.sjdfi
sj = sj0.ix[jids].reset_index()
ex0 = self.exdfi
ex = ex0.ix[eids].reset_index()
dsump = sj.groupby('dpos')['tcnt'].sum().astype(float)
tmp = dsump.ix[sj['dpos'].values]
jdp = sj['tcnt'].values/tmp.values
idx = N.array(tmp==0, dtype=bool)
jdp[idx] = 0.
j2p = dict(zip(sj['name'].values, jdp))
# exon groupby acceptor
asump = ex.groupby('apos')['ecov'].sum().astype(float)
tmp = asump.ix[ex['apos'].values]
eap = ex['ecov'].values/(tmp.values)
idx = N.array(tmp==0, dtype=bool)
eap[idx] = 0.
e2ap = dict(zip(ex['name'].values, eap))
dsump = ex.groupby('dpos')['ecov'].sum().astype(float)
tmp = dsump.ix[ex['dpos'].values]
edp = ex['ecov'].values/(tmp.values)
idx = N.array(tmp==0, dtype=bool)
edp[idx] = 0.
e2dp = dict(zip(ex['name'].values, edp))
return j2p, e2ap, e2dp
def tcov_by_nnls(self, s, e, strand):
o = int(self.st)
p = self.paths
idx = (p['tst']>=s)&(p['ted']<=e)&(p['strand'].isin(A3.STRS[strand]))
ps = p[idx]
if len(ps)==0:
return None
pg = ps.groupby(['tst','ted']).first().reset_index()[['chr','tst','ted','strand','name']].sort_values(['tst','ted'])
pg['strand'] = strand
ne = len(pg)
exa = self.arrs['ex'][strand]
# sja = self.arrs['sj'][strand]
sja = self.filled[strand]
def cov0(s,e):
# return N.sum(sja[s-o:e-o]+exa[s-o:e-o])/(e-s)
return N.mean(sja[s-o:e-o])
# def cov1s(s):
# s0 = max(0, int(s)-o-10)
# s1 = max(s0+1,int(s)-o)
# return N.mean(exa[s0:s1])
# def cov1e(e):
# return N.mean(exa[int(e)-o:int(e)-o+10])
e_ed2cov = self.eed2cov[strand]
e_st2cov = self.est2cov[strand]
def cov1s(s):
return e_ed2cov.get(s,0)
def cov1e(e):
return e_st2cov.get(e,0)
def cov2s(s): # donor
# s0 = max(0, s-o-1)
return max(0, sja[int(s)-o]-sja[int(s)-o-1])
def cov2e(e): # acceptor
# e0 = max(0, e-o-1)
return max(0, sja[int(e)-o-1]-sja[int(e)-o])
# cov0
if ne>1:
pg.rename(columns={'tst':'st','ted':'ed'}, inplace=True)
pg['eid'] = N.arange(len(pg))
ci = UT.chopintervals(pg, idcol='eid')
ci['cov'] = [cov0(s,e) for s,e in ci[['st','ed']].values]
ci['name1'] = ci['name'].astype(str).apply(lambda x: [int(y) for y in x.split(',')])
nc = len(ci)
mat = N.zeros((nc,ne))
for i,n1 in enumerate(ci['name1'].values):# fill in rows
N.put(mat[i], N.array(n1), 1)
try:
ecov,err = nnls(mat, ci['cov'].values)
pg['tcov0a'] = ecov
except e:
# too much iteration?
LOG.warning('!!!!!! Exception in NNLS (tcov_by_nnls) @{0}:{1}-{2}, setting to zero !!!!!!!!!'.format(self.chrom, s, e))
pg['tcov0a'] = 0
# raise e
pg.rename(columns={'st':'tst','ed':'ted'}, inplace=True)
else: # this includes single exons
s,e = pg.iloc[0][['tst','ted']]
pg['tcov0a'] = cov0(s,e)
# cov1, cov2
if ne>1:
sts = sorted(set(pg['tst'].values))
eds = sorted(set(pg['ted'].values))
nst,ned = len(sts),len(eds)
mat = N.array([(pg['tst']==x).values for x in sts]+[(pg['ted']==x).values for x in eds], dtype=float)
c = N.array([cov1s(x) for x in sts]+[cov1e(x) for x in eds])
# enforce flux conservation: scale up 5'
stsum = N.sum(c[:nst])
edsum = N.sum(c[nst:])
if stsum<1e-9 or edsum<1e-9:
pg['tcov0b'] = 0
else:
c0 = c.copy()
if strand in ['+','.+']:
c[:nst] = (edsum/stsum)*c[:nst]
else:
c[nst:] = (stsum/edsum)*c[nst:]
try:
ecov,err = nnls(mat, c)
except e:
print('s:{0},e:{1},strand:{2}'.format(s,e,strand))
print('stsum:', stsum)
print('edsum:', edsum)
print('nnls error tcov0b', mat, c, c0)
print('sts:',sts)
print('eds:',eds)
print('pg:',pg)
pg['tcov0c'] = 0
raise e
pg['tcov0b'] = ecov
mat = N.array([(pg['tst']==x).values for x in sts]+[(pg['ted']==x).values for x in eds], dtype=float)
c = N.array([cov2s(x) for x in sts]+[cov2e(x) for x in eds])
# enforce flux conservation: scale up 5'
stsum = N.sum(c[:nst])
edsum = N.sum(c[nst:])
if stsum<1e-9 or edsum<1e-9:
pg['tcov0c'] = 0
else:
if strand in ['+','.+']:
c[:nst] = (edsum/stsum)*c[:nst]
else:
c[nst:] = (stsum/edsum)*c[nst:]
try:
ecov,err = nnls(mat, c)
except e:
print('s:{0},e:{1},strand:{2}'.format(s,e,strand))
print('nnls error tcov0c', mat, c)
pg['tcov0c'] = 0
raise e
pg['tcov0c'] = ecov
else:
s,e = pg.iloc[0][['tst','ted']]
pg['tcov0b'] = (cov1s(s)+cov1e(e))/2.
pg['tcov0c'] = (cov2s(s)+cov2e(e))/2.
if not self.usegeom:
# pg['tcov0'] = pg[['tcov0a','tcov0b','tcov0c']].mean(axis=1)
# pg['tcov0'] = (2*pg['tcov0a']+pg['tcov0b']+pg['tcov0c'])/4. # weighted
pg['tcov0'] = pg[['tcov0a','tcov0b','tcov0c']].median(axis=1)
else:
pg['tcov0'] = N.power(pg['tcov0a']*pg['tcov0b']*pg['tcov0c'], 1/3.) # geometric mean
pg.loc[pg['tcov0']<0,'tcov0'] = 0 # shouldn't really happen
keys = [tuple(x) for x in p[idx][['tst','ted']].values]
for f in ['tcov0','tcov0a','tcov0b','tcov0c']:
p.loc[idx, f] = pg.set_index(['tst','ted']).ix[keys][f].values
return pg[['chr','tst','ted','strand','tcov0']]
def tcov_by_branchp(self, tst, ted, strand, tcov0):
p = self.paths
idx = (p['strand'].isin(A3.STRS[strand]))&(p['tst']==tst)&(p['ted']==ted)
if N.sum(idx)==0:
return
# if N.sum(idx)>1:
# calculate branchp within this group
jids = set()
eids = set()
for n in p[idx]['name']:
jids.update(n.split(',')[1:-1])
eids.update(n.split('|'))
j2p, e2ap, e2dp = self.calculate_branchp(jids, eids)
def _prob(y):
epath0 = y.split('|')
e5 = epath0[0] # use donor p
epath = epath0[1:] # use acceptor p
jpath = y.split(',')[1:-1]
return e2dp[e5]*N.prod([e2ap[x] for x in epath])*N.prod([j2p[x] for x in jpath])
p.loc[idx,'tcov'] = [tcov0*_prob(y) for y in p[idx]['name']]
# else:
# p.loc[idx,'tcov'] = tcov0
def estimate_abundance(self):
# 1) 5-3 group by NNLS
# 2) within 5-3 group by tree branch prob
paths = self.paths
idxme = paths['name'].str.contains('\|')
mepaths = paths[idxme].copy()
sepaths = paths[~idxme].copy()
self.paths = mepaths
for s in ['+','-']:
ps = mepaths[mepaths['strand'].isin(A3.STRS[s])]
if len(ps)==0:
continue
# for chrom,st,ed in UT.union_contiguous(ps[['chr','st','ed']],returndf=False):
poscols = ['chr','tst','ted']
for chrom,st,ed in UT.union_contiguous(ps[poscols],pos_cols=poscols,returndf=False):
pg = self.tcov_by_nnls(st,ed,s)
if pg is not None:
for chrom,tst,ted,strand,tcov0 in pg.values:
self.tcov_by_branchp(tst,ted,strand,tcov0)
e2c = UT.df2dict(self.exdf, 'name', 'ecov')
sepaths['tcov'] = [e2c[x] for x in sepaths['name']]
for f in ['tcov0','tcov0b']:
sepaths[f] = sepaths['tcov']
sepaths['tcov0a'] = 0.
sepaths['tcov0c'] = 0.
paths = PD.concat([mepaths, sepaths], ignore_index=True)
paths.sort_values(['chr','st','ed'],inplace=True)
self.paths = paths
def write(self):
pre = self.dstpre+'.{0}_{1}_{2}'.format(self.chrom,self.st,self.ed)
# 1) exon, junctions, allpaths => csv (no header <= to concatenate bundles)
ecols = A3.EXDFCOLS #['chr','st','ed','strand','name','kind','ecov']
UT.write_pandas(self.exdf[ecols], pre+'.covs.exdf.txt.gz', '')
scols = A3.SJDFCOLS #['chr','st','ed','strand','name','kind','tcnt' ]#,'donor','acceptor','dp','ap']
UT.write_pandas(self.sjdf[scols], pre+'.covs.sjdf.txt.gz', '')
pcols = A3.PATHCOLS #['chr','st','ed','name','strand','tst','ted','tcov0','tcov1','tcov']
UT.write_pandas(self.paths[pcols], pre+'.covs.paths.txt.gz', '')
# write colored bed12 for tcov > th
tgt = self.paths[self.paths['tcov']>=self.tcovth].copy()
self.bed12 = A3.path2bed12(tgt, cmax=9, covfld='tcov')
GGB.write_bed(self.bed12, pre+'.covs.paths.bed.gz',ncols=12)
def bundle_estimator(modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom):
bname = A3.bundle2bname((chrom,st,ed))
bsuf = '.{0}_{1}_{2}'.format(chrom,st,ed)
csuf = '.{0}'.format(chrom)
sufs = ['.covs.exdf.txt.gz',
'.covs.sjdf.txt.gz',
'.covs.paths.txt.gz',
'.covs.paths.bed.gz',
]
done = []
for x in sufs:
done.append(os.path.exists(dstpre+bsuf+x) | \
os.path.exists(dstpre+csuf+x) | \
os.path.exists(dstpre+x) )
if all(done):
LOG.info('bunle {0} already done, skipping'.format(bname))
return bname
LOG.info('processing bunle {0}'.format(bname))
la = LocalEstimator(modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom)
return la.process()
def concatenate_bundles(bundles, dstpre):
# concat results
sufs = ['covs.exdf.txt.gz',
'covs.sjdf.txt.gz',
'covs.paths.txt.gz',
'covs.paths.bed.gz',
]
files = []
for suf in sufs:
dstpath = '{0}.{1}'.format(dstpre, suf)
if not os.path.exists(dstpath):
with open(dstpath, 'wb') as dst:
for chrom, st, ed in bundles:
bname = A3.bundle2bname((chrom,st,ed))
srcpath = '{0}.{1}_{2}_{3}.{4}'.format(dstpre, chrom, st, ed, suf)
files.append(srcpath)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src, dst)
else:
files+=['{0}.{1}_{2}_{3}.{4}'.format(dstpre, chrom, st, ed, suf) for chrom,st,ed in bundles]
# cleanup
for f in files:
if os.path.exists(f):
os.unlink(f)
def estimatecovs(modelpre, bwpre, dstpre, genome, tcovth=1, usegeom=True, np=6):
bed = GGB.read_bed(modelpre+'.paths.withse.bed.gz')
chroms = bed['chr'].unique()
csizedic = UT.df2dict(UT.chromdf(genome), 'chr', 'size')
bundles = []
args = []
for chrom in chroms:
sub = bed[(bed['chr']==chrom)]
uc = UT.union_contiguous(sub[['chr','st','ed']], returndf=True)
# total about 30K=> make batch of ~1000
n = len(uc)
nb = int(N.ceil(n/1000.))
for i in range(nb):
sti = 1000*i
edi = min(1000*(i+1), len(uc)-1)
st = max(uc.iloc[sti]['st'] - 100, 0)
ed = min(uc.iloc[edi]['ed'] + 100, csizedic[chrom])
args.append([modelpre, bwpre, chrom, st, ed, dstpre, tcovth, usegeom])
bundles.append((chrom,st,ed))
rslts = UT.process_mp(bundle_estimator, args, np=np, doreduce=False)
concatenate_bundles(bundles, dstpre)
class CovEstimator(object):
def __init__(self, modelpre, bwpre, dstpre, genome, tcovth=1, usegeom=False, np=6):
self.modelpre = modelpre
self.bwpre = bwpre
self.dstpre = dstpre
self.genome = genome
self.tcovth = tcovth
self.usegeom = usegeom
self.np = np
def run(self):
self.server = server = TQ.Server(np=self.np)
print('reading paths.withse.bed.gz')
bed = GGB.read_bed(self.modelpre+'.paths.withse.bed.gz')
chroms = bed['chr'].unique()
csizedic = UT.df2dict(UT.chromdf(self.genome), 'chr', 'size')
self.bundlestatus = bundlestatus = {}
self.bundles = bundles = []
with server:
print('starting task server')
subid = 0
for chrom in chroms:
print('chrom {0}'.format(chrom))
sub = bed[(bed['chr']==chrom)]
uc = UT.union_contiguous(sub[['chr','st','ed']], returndf=True)
# total about 30K=> make batch of ~1000
n = len(uc)
nb = int(N.ceil(n/1000.))
print(chrom,nb)
for i in range(nb):
print('putting in bundle_estimator {0}.{1}'.format(chrom,subid))
sti = 1000*i
edi = min(1000*(i+1), len(uc)-1)
st = max(uc.iloc[sti]['st'] - 100, 0)
ed = min(uc.iloc[edi]['ed'] + 100, csizedic[chrom])
args = [self.modelpre, self.bwpre, chrom, st, ed, self.dstpre, self.tcovth, self.usegeom]
tname = 'bundle_estimator.{0}'.format(subid)
subid += 1
task = TQ.Task(tname, bundle_estimator, args)
server.add_task(task)
bundles.append((chrom,st,ed))
nb = len(bundles)
while server.check_error():
try:
name, rslt = server.get_result(timeout=5)
except TQ.Empty:
name, rslt = None, None
if name is not None:
if name.startswith('bundle_estimator.'):
subid = name.split('.')[-1]
bundlestatus[subid] = rslt
if len(bundlestatus)==nb:
print('$$$$$$$$ putting in concatenate_bundles $$$$$$$$$$$')
tname='concatenate_bundles'
args = (bundles, self.dstpre)
task = TQ.Task(tname, concatenate_bundles, args)
server.add_task(task)
if name=='concatenate_bundles':
print('$$$$$$$$ concatenate_bundles done $$$$$$$$$$$')
break
print('Exit Loop')
print('Done')
############# Cov Collector ######################################################
class CovCollector(object):
def __init__(self, covpres, dstpre, np=7):
self.covpres = covpres
self.modelpre = covpres[0]
self.dstpre = dstpre
self.np = np
def run(self):
self.server = server = TQ.Server(np=self.np)
self.exdf = ex = UT.read_pandas(self.modelpre+'.covs.exdf.txt.gz', names=A3.EXDFCOLS)
self.chroms = chroms = ex['chr'].unique()
self.exstatus = exstatus = {}
self.sjstatus = sjstatus = {}
self.pastatus = pastatus = {}
exdone=False
sjdone=False
padone=False
n = len(self.covpres)
nb = int(N.ceil(n/50.))
with server:
for subid in range(nb):
covpressub = self.covpres[50*subid:50*(subid+1)]
# ex
tname = 'collect_ecov_subset.{0}'.format(subid)
args = (self.modelpre, covpressub, self.dstpre, subid)
task = TQ.Task(tname, collect_ecov_subset, args)
server.add_task(task)
# sj
tname = 'collect_tcnt_subset.{0}'.format(subid)
args = (self.modelpre, covpressub, self.dstpre, subid)
task = TQ.Task(tname, collect_tcnt_subset, args)
server.add_task(task)
# path
tname = 'collect_tcovs_subset.{0}'.format(subid)
args = (self.modelpre, covpressub, self.dstpre, subid)
task = TQ.Task(tname, collect_tcovs_subset, args)
server.add_task(task)
while server.check_error():
try:
name, rslt = server.get_result(timeout=5)
except TQ.Empty:
name, rslt = None, None
if name is not None:
if name.startswith('collect_ecov_subset.'):
subid = name.split('.')[-1]
exstatus[subid] = rslt
if len(exstatus)==nb:
print('$$$$$$$$ putting in concatenate_ecov_subsets $$$$$$$$$$$')
for chrom in chroms:
tname='concatenate_ecov_subsets'
args = (self.modelpre, self.dstpre, range(nb), chrom)
task = TQ.Task(tname, concatenate_ecov_subsets, args)
server.add_task(task)
if name.startswith('collect_tcnt_subset.'):
subid = name.split('.')[-1]
sjstatus[subid] = rslt
if len(sjstatus)==nb:
print('$$$$$$$$ putting in concatenate_tcnt_subsets $$$$$$$$$$$')
for chrom in chroms:
tname='concatenate_tcnt_subsets'
args = (self.modelpre, self.dstpre, range(nb), chrom)
task = TQ.Task(tname, concatenate_tcnt_subsets, args)
server.add_task(task)
if name.startswith('collect_tcovs_subset.'):
subid = name.split('.')[-1]
pastatus[subid] = rslt
if len(pastatus)==nb:
print('$$$$$$$$ putting in concatenate_tcovs_subsets $$$$$$$$$$$')
for chrom in chroms:
tname='concatenate_tcovs_subsets'
args = (self.modelpre, self.dstpre, range(nb), chrom)
task = TQ.Task(tname, concatenate_tcovs_subsets, args)
server.add_task(task)
if name=='concatenate_ecov_subsets':
print('$$$$$$$$ concatenate_ecov_subsets done $$$$$$$$$$$')
exdone=True
if name=='concatenate_tcnt_subsets':
print('$$$$$$$$ concatenate_tcnt_subsets done $$$$$$$$$$$')
sjdone=True
if name=='concatenate_tcovs_subsets':
print('$$$$$$$$ concatenate_tcovs_subsets done $$$$$$$$$$$')
padone=True
if exdone&sjdone&padone:
break
print('Exit Loop')
print('Done')
def collect_ecov_subset(modelpre, covpressub, dstpre, subid):
return _collect_subset(modelpre, covpressub, dstpre, subid, 'ex')
def concatenate_ecov_subsets(modelpre, dstpre, subids, chrom):
return _concatenate_subsets(modelpre, dstpre, subids, 'ex', chrom)
def collect_tcnt_subset(modelpre, covpressub, dstpre, subid):
return _collect_subset(modelpre, covpressub, dstpre, subid, 'sj')
def concatenate_tcnt_subsets(modelpre, dstpre, subids, chrom):
return _concatenate_subsets(modelpre, dstpre, subids, 'sj', chrom)
def collect_tcovs_subset(modelpre, covpressub, dstpre, subid):
return _collect_subset(modelpre, covpressub, dstpre, subid, 'pa')
def concatenate_tcovs_subsets(modelpre, dstpre, subids, chrom):
return _concatenate_subsets(modelpre, dstpre, subids, 'pa', chrom)
def _collect_subset(modelpre, covpressub, dstpre, subid, which):
if which == 'ex':
suf = 'exdf'
flds = ['ecov']
fsuf = 'ecovs'
cols = A3.EXDFCOLS
elif which == 'sj':
suf = 'sjdf'
flds = ['tcnt']
fsuf = 'tcnts'
cols = A3.SJDFCOLS
else:
suf = 'paths'
flds = ['tcov0','tcov']
fsuf = 'tcovs'
cols = A3.PATHCOLS
ex0 = UT.read_pandas(modelpre+'.covs.{0}.txt.gz'.format(suf), names=cols)
chroms = ex0['chr'].unique()
# read in exdf sort, transpose and write(append) to dst
if all([os.path.exists(dstpre+'.{1}.{0}.txt.gz'.format(c,fsuf)) for c in chroms]):
return []
if all([os.path.exists(dstpre+'.{2}.{0}.{1}.txt.gz'.format(c,subid,fsuf)) for c in chroms]):
return []
ex0.sort_values(['chr','st','ed','strand'], inplace=True)
names = []
for pre in covpressub:
name = pre.split('/')[-1]
ex1 = UT.read_pandas(pre+'.covs.{0}.txt.gz'.format(suf), names=cols)
ex1.sort_values(['chr','st','ed','strand'], inplace=True)
for f in flds:
cname = '{0}.{1}'.format(name, f)
ex0[cname] = ex1[f].values
names.append(cname)
ex0.reset_index(inplace=True)
files = []
for chrom in ex0['chr'].unique():
ex0chr = ex0[ex0['chr']==chrom].sort_values(['st','ed','strand'])
dst = dstpre+'.{2}.{0}.{1}.txt.gz'.format(chrom,subid,fsuf)
UT.write_pandas(ex0chr[names].T, dst, 'i')
files.append(dst)
return files
def _concatenate_subsets(modelpre, dstpre, subids, which, chrom):
if which == 'ex':
suf = 'exdf'
fsuf = 'ecovs'
cols = A3.EXDFCOLS
elif which == 'sj':
suf = 'sjdf'
fsuf = 'tcnts'
cols = A3.SJDFCOLS
else:
suf = 'paths'
fsuf = 'tcovs'
cols = A3.PATHCOLS
ex0 = UT.read_pandas(modelpre+'.covs.{0}.txt.gz'.format(suf), names=cols)
chroms = ex0['chr'].unique()
files = []
dstpath0 = dstpre+'.{1}.{0}.tmp.txt.gz'.format(chrom,fsuf)
dstpath1 = dstpre+'.{1}.{0}.txt.gz'.format(chrom,fsuf)
if not os.path.exists(dstpath1):
with open(dstpath0, 'wb') as dst:
for subid in subids:
srcpath = dstpre+'.{2}.{0}.{1}.txt.gz'.format(chrom,subid,fsuf)
with open(srcpath, 'rb') as src:
shutil.copyfileobj(src,dst)
files.append(srcpath)
ex0chr = ex0[ex0['chr']==chrom].sort_values(['st','ed','strand'])
ex1chr = UT.read_pandas(dstpath0,names=ex0chr.index,index_col=[0]).T
df = PD.concat([ex0chr, ex1chr],axis=1)
UT.write_pandas(df, dstpath1, 'h')
files.append(dstpath0)
#os.unlink(dstpath0)
for f in files:
if os.path.exists(f):
os.unlink(f)
return dstpath1
############# SJ count Collector ####################################################
import os
from jgem import gtfgffbed as GGB
import os
from jgem import gtfgffbed as GGB
def collect_one(bwpre):
# because of unstranded data name (jid) cannot be trusted
# just use locus (chr:st-ed) (st<ed)
sjpaths = GGB.read_bed(bwpre+'.sjpath.bed.gz')
sjpaths['ucnt'] = sjpaths['sc1']
sjpaths['tcnt'] = sjpaths['sc2']
sjpaths['jids'] = sjpaths['name'].str.split(',')
sj = UT.flattendf(sjpaths, 'jids')
sj['sted'] = [[int(y) for y in x.split('|')] for x in sj['jids']]
#idxp = sj['strand'].isin(['+','.'])
sj['st'] = [min(x) for x in sj['sted']]
sj['ed'] = [max(x) for x in sj['sted']]
sj['locus'] = UT.calc_locus(sj)
l2u = UT.df2dict(sj, 'locus', which)
return l2u
def collect_sjcnts_worker(idf, subsi, acode, which, dstpath):
# which tcnt, ucnt
# idf ['locus']
cols = []
for sname, bwpre in subsi[['name','bwpre']].values:
l2u = collect_one(bwpre)
idf[sname] = [l2u.get(x,0) for x in idf['locus']]
cols.append(sname)
UT.write_pandas(idf[cols], dstpath, 'ih') # don't want non-sample columns
return dstpath
def collect_sjcnts(dataset_code, si, assembly_code, modelpre, which, outdir, np=7):
"""
Args:
dataset_code: identifier to indicate dataset
si: dataset sampleinfo dataframe
(required cololums: name, sjbed_path=path to (converted) raw juncton count file)
assembly_code: identifier for assembly
sjexpre: assembly sjex path prefix
which: ucnt, mcnt, jcnt=ucnt or mcnt (when ucnt=0)
outdir: output directory
"""
sj = UT.read_pandas(modelpre+'.sj.txt.gz')
#sj['st'] = sj['st-1'] # old format
sj['locus'] = UT.calc_locus(sj,'chr','st-1','ed')
#sj['uid'] = sj['chr']+':'+sj['name']
idf = sj[['_id', 'locus']].set_index('_id')
#idf = sj[['_id', 'uid']].copy()
dstpre = os.path.join(outdir, '{0}.{1}'.format(dataset_code, assembly_code))
batchsize = int(N.ceil(len(si)/float(np)))
args = []
files = []
si1 = si[['name','bwpre']]
for i in range(np):
subsi = si1.iloc[i*batchsize:(i+1)*batchsize].copy()
dstpath = dstpre+'.{0}.part{1}.txt.gz'.format(which, i)
files.append(dstpath)
args.append((idf, subsi, assembly_code, which, dstpath))
rslts = UT.process_mp(collect_sjcnts_worker, args, np=np, doreduce=False)
# concat part files
dfs = [UT.read_pandas(fpath, index_col=[0]) for fpath in files]
df = PD.concat(dfs, axis=1)
dstpath = dstpre+'.{0}s.txt.gz'.format(which)
UT.write_pandas(df, dstpath, 'ih')
for fpath in files:
os.unlink(fpath)
return df
###################
def fix_i53completematch(exdf, paths):
# extend edge of 5'3' exons if they completely match to internal exons
idxp = exdf['strand'].isin(A3.STRS['+'])
idx5 = exdf['kind']=='5'
idx3 = exdf['kind']=='3'
idxi = exdf['kind']=='i'
ileft = (idxp&idx5)|(~idxp&idx3)
iright = (idxp&idx3)|(~idxp&idx5)
steds = set([(c,x,y) for c,x,y in exdf[idxi][['chr','st','ed']].values])
idxm = N.array([(c,x,y) in steds for c,x,y in exdf[['chr','st','ed']].values], dtype=bool)
imleft = ileft&idxm
imright = iright&idxm
while (N.sum(imleft)+N.sum(imright))>0:
# fix exdf st,ed
exdf.loc[imleft,'st'] = exdf[imleft]['st']-10
exdf.loc[imright, 'ed'] = exdf[imright]['ed']+10
# make old name => new name map
im5 = (imleft|imright)&idx5
im3 = (imleft|imright)&idx3
LOG.info('{0} 5exon fixed, {1} 3exon fixed'.format(N.sum(im5),N.sum(im3)))
tmp = exdf[im5][['chr','name','st','ed','strand']].values
n2n5 = dict([('{0}:{1}'.format(c,n),A3._pc(s,e,strand,',')) for c,n,s,e,strand in tmp])
tmp = exdf[im3][['chr','name','st','ed','strand']].values
n2n3 = dict([('{0}:{1}'.format(c,n),A3._pc(s,e,strand,',')) for c,n,s,e,strand in tmp])
# fix path name, st, ed
p5ids = ['{0}:{1}'.format(c,n.split('|')[0]) for c,n in paths[['chr','name']].values]
p3ids = ['{0}:{1}'.format(c,n.split('|')[-1]) for c,n in paths[['chr','name']].values]
p5idx = N.array([x in n2n5 for x in p5ids], dtype=bool)
p3idx = N.array([x in n2n3 for x in p3ids], dtype=bool)
def _fix5(c,n,n2n5):
tmp = n.split('|')
n5 = n2n5['{0}:{1}'.format(c,tmp[0])]
return '|'.join([n5]+tmp[1:])
def _fix3(c,n,n2n3):
tmp = n.split('|')
n3 = n2n3['{0}:{1}'.format(c,tmp[-1])]
return '|'.join(tmp[:-1]+[n3])
paths.loc[p5idx,'name'] = [_fix5(c,n,n2n5) for c,n in paths[p5idx][['chr','name']].values]
paths.loc[p3idx,'name'] = [_fix3(c,n,n2n3) for c,n in paths[p3idx][['chr','name']].values]
pidx = p5idx|p3idx
def _st(n):
tmp = n.split(',')
st0 = int(tmp[0])
ed0 = int(tmp[-1])
return min(st0,ed0)
def _ed(n):
tmp = n.split(',')
st0 = int(tmp[0])
ed0 = int(tmp[-1])
return max(st0,ed0)
paths.loc[pidx,'st'] = [_st(n) for n in paths[pidx]['name']]
paths.loc[pidx,'ed'] = [_ed(n) for n in paths[pidx]['name']]
# fix exdf name
exdf.loc[im5, 'name'] = [_fix5(c,n,n2n5) for c,n in exdf[im5][['chr','name']].values]
exdf.loc[im3, 'name'] = [_fix3(c,n,n2n3) for c,n in exdf[im3][['chr','name']].values]
idxm = N.array([(c,x,y) in steds for c,x,y in exdf[['chr','st','ed']].values], dtype=bool)
imleft = ileft&idxm
imright = iright&idxm
################### 5gr,53gr cov
def heads(paths, chrom, strand):
#if 'id' not in paths:
# paths['id']= paths['chr']+':'+paths['name'] #N.arange(len(paths))
p = paths[(paths['strand'].isin(A3.STRS[strand]))&(paths['chr']==chrom)]
if 'pc' in paths:
heads = [([int(y) for y in x.split('|')[0].split(',')], i) for i,x in p[['id','pc']].values]
else:
heads = [([int(y) for y in x.split('|')[0].split(',')], i) for i,x in p[['id','name']].values]
if strand in ['-','.-']:
heads = [(x[0][::-1],x[1]) for x in heads]
return heads
def headgroups(heads):
heads = sorted(heads)
def _gen():
cids = [heads[0][1]]
cst,ced = heads[0][0]
for (st,ed), nid in heads[1:]:
if st<ced: # overlap
# add to current group
cids.append(nid)
# expand
ced = max(ced, ed)
else: # yield current group and make new
yield ([cst,ced], cids)
cst,ced = st,ed
cids = [nid]
yield ([cst,ced],cids)
return [x for x in _gen()]
def find_all_5groups(paths):
hgs = {} # head groups
i2g = {} # id => group map
if 'pc' in paths:
idxme = paths['pc'].str.contains('\|')
paths['id'] = paths['chr']+':'+paths['pc']
else:
idxme = paths['name'].str.contains('\|')
paths['id'] = paths['chr']+':'+paths['name']
mepaths = paths[idxme]
sepaths = paths[~idxme]
for chrom in paths['chr'].unique():
hgs[chrom] = {}
for strand in ['+','-']:
h = heads(mepaths, chrom, strand)
hg = headgroups(h)
print('{0}:{1}:#hg={2}'.format(chrom,strand,len(hg)))
hgs[chrom][strand] = hg
for (st,ed),ids in hg:
g = '{0}:{1}-{2}:{3}'.format(chrom,st,ed,strand) # group id
for i in ids:
i2g[i] = g
for chrom,st,ed,i in sepaths[['chr','st','ed','id']].values:
i2g[i] = '{0}:{1}-{2}:s'.format(chrom,st,ed)
return i2g, hgs
# paths = GGB.read_bed(rdstpre+'.paths.withse.bed.gz')
def make_idmap(mdstpre):
ex = UT.read_pandas(mdstpre+'.ex.txt.gz')
paths = GGB.read_bed(mdstpre+'.paths.withse.bed.gz')
ex['id'] = ex['chr']+':'+ex['name']
i2gn = UT.df2dict(ex, 'id', 'gname')
paths['id'] = paths['chr']+':'+paths['name']
paths['id0'] = paths['chr']+':'+paths['name'].str.split('|').str[0]
#paths['gname'] = [i2gn[c+':'+x.split('|')[0]] for c,x in paths[['chr','name']].values]
paths['gname'] = [i2gn[x] for x in paths['id0']]
g2cnt = {}
tnames = []
for x in paths['gname']:
i = g2cnt.get(x,1)
tnames.append('{0}.{1}'.format(x,i))
g2cnt[x] = i+1
paths['tname'] = tnames
i2gn = UT.df2dict(paths, 'id', 'gname')
i2tn = UT.df2dict(paths, 'id', 'tname')
idf = PD.DataFrame({'gname':i2gn, 'tname':i2tn})
idf.index.name = 'id'
UT.write_pandas(idf, mdstpre+'.idmap.txt.gz', 'ih')
return idf
def read_paths(mdstpre):
jg = GGB.read_bed(mdstpre+'.paths.withse.bed.gz')
if os.path.exists(mdstpre+'.idmap.txt.gz'):
idf = UT.read_pandas(mdstpre+'.idmap.txt.gz')
idf = idf.set_index('id')
else:
idf = make_idmap(mdstpre)
jg['id'] = jg['chr']+':'+jg['name']
jg['pc'] = jg['name']
jg['ic'] = [','.join(x.split(',')[1:-1]) for x in jg['pc']] # intron chain
jg['gname'] = idf.ix[jg['id'].values]['gname'].values
jg['name'] = idf.ix[jg['id'].values]['tname'].values
return jg
# chromwise tcov => chromwise gcov5, gcov53 => concat
def calc_gcovs_chrom(covpre,chrom,i2g,tcov0cols):
tgt = covpre+'.tcovs.{0}.txt.gz'.format(chrom)
df = UT.read_pandas(tgt)
df['id'] = df['chr']+':'+df['name']
df1 = df.groupby('id').first()
tcov0_g53 = df1.groupby(['chr','tst','ted','strand'])[tcov0cols].first()
tcov0_g5 = df1.reset_index().groupby(['chr','tst','ted','strand']).first().\
set_index('id').groupby(i2g)[tcov0cols].sum()
g53dst = covpre+'.tcov53.{0}.txt.gz'.format(chrom)
g5dst = covpre+'.tcov5.{0}.txt.gz'.format(chrom)
UT.write_pandas(tcov0_g53.reset_index(), g53dst, '')
UT.write_pandas(tcov0_g5.reset_index(), g5dst,'')
return (g5dst, g53dst)
def calc_gcovs(covpre,modelpre,snames,acode,np=7,deletechromfiles=True):
paths = read_paths(modelpre) #+'.paths.withse.bed.gz')
i2g,hgs = find_all_5groups(paths)
tcov0cols = ['{0}.{1}.tcov0'.format(x,acode) for x in snames]
chroms = paths['chr'].unique()
args = [(covpre,c,i2g,tcov0cols) for c in chroms]
rslts = UT.process_mp2(calc_gcovs_chrom, args, np=np, doreduce=False)
# concatenate chrom files
g5dst0 = covpre+'.tcov5.txt.gz'
g53dst0 = covpre+'.tcov53.txt.gz'
files = []
with open(g5dst0,'wb') as dst:
for c in chroms:
g5dst = covpre+'.tcov5.{0}.txt.gz'.format(c)
with open(g5dst,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(g5dst)
with open(g53dst0,'wb') as dst:
for c in chroms:
g53dst = covpre+'.tcov53.{0}.txt.gz'.format(c)
with open(g53dst,'rb') as src:
shutil.copyfileobj(src,dst)
files.append(g53dst)
# add header, tname columns
g53names = paths.groupby(['chr','tst','ted','strand'])['name'].apply(lambda x:','.join(x)).to_frame()
g5names = paths.set_index('id').groupby(i2g)['name'].apply(lambda x:','.join(x)).to_frame()
df = UT.read_pandas(g5dst0, names=['g5id']+tcov0cols)
df['tnames'] = g5names.ix[df['g5id'].values]['name'].values
UT.write_pandas(df, g5dst0)
df = UT.read_pandas(g53dst0, names=['chr','tst','ted','strand']+tcov0cols)
ids = [tuple(x) for x in df[['chr','tst','ted','strand']].values]
df['tnames'] = g53names.ix[ids]['name'].values
UT.write_pandas(df, g53dst0)
if deletechromfiles:
for f in files:
os.unlink(f)
|
mit
|
themurph/openshift-tools
|
openshift_tools/reporting/zabbixplot.py
|
13
|
13451
|
#!/bin/env python
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Jesse Kennedy <[email protected]>
"""Contains utilities for plotting data from Zabbix API"""
# Wishlist: plot multiple triggers
# Reason: Disable import checking for third party libraries
# Status: permanently disabled
# pylint: disable=import-error
from __future__ import print_function, division
import sys
import datetime
import re
from collections import defaultdict
from operator import itemgetter
from itertools import groupby
from matplotlib import pyplot as plt, dates as mdates, ticker
import numpy as np
# Reason: DTO
# Status: permanently disabled
# pylint: disable=too-few-public-methods
class ZabbixTimespan(object):
"""Data class for holding timespan-related objects"""
def __init__(self, days):
# TODO: Allow arbitrary end time
self.time_start = datetime.datetime.now() - datetime.timedelta(days=days)
self.time_end = datetime.datetime.now()
self.timestamp_start = int(self.time_start.strftime("%s"))
self.timestamp_end = int(self.time_end.strftime("%s"))
self.mtime_start = mdates.date2num(self.time_start)
self.mtime_end = mdates.date2num(self.time_end)
class ZabbixPlot(object):
"""Contains utilities for plotting data from Zabbix API"""
# Reason: All of these parameters are needed
# Status: permanently disabled
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self, zapi, group, trigger, timespan, limit, periods):
self.zapi = zapi
self.group = group
self.trigger = trigger
self.timespan = timespan
self.limit = limit
self.periods = periods + 1 # One more for the ending pseudo-period
self.events = None
self.num_hosts = None
def plot(self):
"""Create both plots and show the user"""
_, axes = plt.subplots(2, sharex=True, figsize=(16, 10))
self.plot_timeline(axes[0])
self.plot_aggregate(axes[1])
axes[0].set_title("Time in \"" + self.trigger + "\" problem state over " + str(self.num_hosts) +
" hosts from " + self.timespan.time_start.strftime("%a, %b %d %H:%M") +
" to " + self.timespan.time_end.strftime("%a, %b %d %H:%M"), size=16)
plt.tight_layout()
plt.show()
def get_event_data(self):
"""Retrieve event data from Zabbix API"""
# Warning: If there are variables in the description, they won't be filled in
triggers = self.zapi.trigger.get(group=self.group, search={"description": self.trigger}, limit=1)
if len(triggers) == 0:
raise Exception("No triggers found")
# Replace trigger search term with full description and {variables} replaced with *
self.trigger = re.sub(r"\{.*?\}", "*", triggers[0]["description"])
print(self.trigger)
# Find the rest of the triggers, searching by the first trigger found in order to prevent multiple triggers from
# being mixed together
triggers = self.zapi.trigger.get(group=self.group, search={"description": self.trigger},
searchWildcardsEnabled=True)
triggerids = [t["triggerid"] for t in triggers]
self.events = self.zapi.event.get(time_from=self.timespan.timestamp_start,
time_till=self.timespan.timestamp_end, objectids=triggerids,
limit=self.limit, selectHosts="extend", sortfield="clock", sortorder="DESC")
if len(self.events) >= self.limit:
print("Warning! Data has been truncated by the result limit. The graph will be inaccurate.",
file=sys.stderr)
def get_num_hosts_in_group(self, group_name):
"""Retrieve the number of hosts in a hostgroup from Zabbix API"""
self.num_hosts = int(self.zapi.hostgroup.get(filter={"name": group_name}, selectHosts="count")[0]["hosts"])
@staticmethod
def get_period(timestamp, periods):
"""Gets the period a timestamp is in (the one to the left)"""
for period in reversed(periods):
if timestamp >= period:
return period
raise Exception("Timestamp not found in periods array")
@staticmethod
def get_next_period(timestamp, periods):
"""Gets the period after a timestamp (the one to the right)"""
for period in periods:
if timestamp < period:
return period
raise Exception("Next timestamp not found in periods array")
@staticmethod
def _calculate_anomaly_downtime(event_clock, periods, downtime):
# UNUSED: Innaccurate
"""Assume we missed the beginning downtime event and retroactively apply downtime to periods"""
current_period = periods[0]
downtime[current_period] = 0
while event_clock > current_period:
next_period = ZabbixPlot.get_next_period(current_period, periods)
if event_clock <= next_period:
# Downtime ends in this period
downtime[current_period] += event_clock - current_period
else:
# Downtime ends in another period
downtime[current_period] += next_period - current_period
current_period = next_period
return
@staticmethod
def _calculate_event_downtime(event_value, event_clock, host_id, down_since):
"""Calculate the amount of downtime or uptime caused by an event"""
if event_value != "0":
# Going down
if host_id not in down_since:
# Host was up
down_since[host_id] = event_clock
return 0
else:
# Host was already down
return 0
else: # event_value == "0":
# Coming up
if host_id in down_since:
# Host was down
# print("host", host_id, "was down since", down_since[host_id])
return event_clock - down_since.pop(host_id)
else:
# Host was already up (did we miss the beginning downtime event?)
return 0
def _calculate_total_downtime(self, periods):
"""Calculates the total amount of downtime divided into periods (and number of nodes it found)"""
period_func = lambda event: ZabbixPlot.get_period(int(event["clock"]), periods)
host_func = lambda event: event["hosts"][0]["name"]
combo_func = lambda event: (event["clock"], event["hosts"][0]["name"])
sorted_events = sorted(self.events, key=combo_func, reverse=False)
# Pre-fill time periods for graphing no downtime during a period
downtime = {}
for period in periods:
downtime[period] = 0 # Yay
down_since = {}
for period_id, period_events in groupby(sorted_events, period_func):
# Loop through periods
current_period_end = ZabbixPlot.get_next_period(period_id, periods)
for host_id, host_events in groupby(period_events, host_func):
# Loop through hosts in period
for event in host_events:
# Loop through events from this host
deltatime = self._calculate_event_downtime(event["value"], int(event["clock"]), host_id, down_since)
if deltatime is None:
self._calculate_anomaly_downtime(int(event["clock"]), periods, downtime)
else:
# print("adding", deltatime, "to", datetime.datetime.fromtimestamp(period_id))
downtime[period_id] += deltatime
for host in down_since:
# Go through unclosed downtime, add rest of period to downtime, set since to end of this period
downtime[period_id] += current_period_end - down_since[host]
down_since[host] = current_period_end
return downtime
def plot_aggregate(self, axis):
"""Plots a bar chart from aggregated event occurences"""
if self.events is None:
self.get_event_data()
if self.num_hosts is None:
self.get_num_hosts_in_group(self.group)
# Set up periods
periods = np.linspace(self.timespan.timestamp_start, self.timespan.timestamp_end, num=self.periods,
endpoint=True)
period_seconds = (self.timespan.timestamp_end - self.timespan.timestamp_start) / (self.periods - 1)
downtime = self._calculate_total_downtime(periods)
# Make downtime relative and a percentage
downtime_list = sorted(downtime.items(), key=itemgetter(0))
rel_downtime = np.array([i[1] / self.num_hosts / period_seconds * 100 for i in downtime_list])
period2mdate = lambda p: mdates.date2num(datetime.datetime.fromtimestamp(p))
times = np.array([period2mdate(i[0]) for i in downtime_list])
# Create the plot
plt.sca(axis)
width = period_seconds / 60.0 / 60.0 / 24.0 # 1 unit = 1 day
plt.bar(times[:-1], rel_downtime[:-1], color="r", alpha=0.75, width=width)
axis.set_xticks(times)
axis.set_xlim(times[0], times[-1])
axis.xaxis.set_major_formatter(mdates.DateFormatter('%a, %b %d %H:%M'))
axis.xaxis.set_major_locator(ticker.FixedLocator(times))
plt.setp(axis.get_xticklabels(), rotation=15, ha="right")
axis.set_xlabel("Interval: " + str(datetime.timedelta(seconds=period_seconds)))
axis.yaxis.set_major_formatter(ticker.FuncFormatter(lambda x, _: str(x) + "%"))
axis.set_ylabel("Site-wide unavailability")
# plt.show()
def plot_timeline(self, axis):
"""Plots a timeline of events for hosts in a group"""
if self.events is None:
self.get_event_data()
host_func = lambda event: event["hosts"][0]["name"]
combo_func = lambda event: (event["hosts"][0]["name"], event["clock"])
sorted_events = sorted(self.events, key=combo_func, reverse=False)
plt.sca(axis)
height = 0.8 # Bar width
host_bars = defaultdict(list)
host_downtime = defaultdict(float)
hosts_seen = set()
for host_id, host_events in groupby(sorted_events, host_func):
# Loop through hosts
for event in host_events:
timestamp = mdates.date2num(datetime.datetime.fromtimestamp(int(event["clock"])))
if event["value"] != "0":
# Event is in error state; plot event
hosts_seen.add(host_id)
host_downtime[host_id] += self.timespan.mtime_end - timestamp
host_bars[host_id].append((self.timespan.mtime_end - timestamp, timestamp, "r", 1))
elif host_id in hosts_seen:
host_downtime[host_id] -= self.timespan.mtime_end - timestamp
host_bars[host_id].append((self.timespan.mtime_end - timestamp, timestamp, "0.95", 1))
i = 0
ordered_hosts = sorted(host_downtime.items(), key=lambda (k, v): v)
for host_tuple in ordered_hosts:
if host_tuple[1] > 0:
axis.barh(i, self.timespan.mtime_end - self.timespan.mtime_start, left=self.timespan.mtime_start,
height=height, color="0.95", linewidth=0, zorder=0)
for point in host_bars[host_tuple[0]]:
axis.barh(i, point[0], left=point[1], height=height, color=point[2], zorder=point[3], linewidth=0)
i += 1
# # Set x axis bounds
# axis.set_xlim(self.timespan.mtime_start, self.timespan.mtime_end)
# # Rotate x axis labels
# plt.setp(axis.get_xticklabels(), rotation=15, ha="right")
# # Turn top horizontal tick marks off
# #plt.tick_params(axis='x', which="both", top="off")
# # Turn vertical gridlines on
# #axis.xaxis.grid(color="0", linestyle="--", linewidth=1)
# # Format x axis tick marks
# axis.xaxis.set_major_locator(mdates.AutoDateLocator())
# axis.xaxis.set_major_formatter(mdates.DateFormatter('%x %X'))
# axis.xaxis.set_minor_locator(mdates.HourLocator(byhour=[0, 6, 12, 18]))
# Create a list of hostnames from list of tuples
keys = [host[0] for host in ordered_hosts if host[1] > 0]
# Set y axis tick labels (height/2 for centering)
axis.set_yticks(np.arange(len(keys)) + height/2)
axis.set_yticklabels(keys)
# Set y axis label font size and turn off right vertical tick marks
plt.tick_params(axis='y', labelsize=9, right="off")
# Set y axis label
axis.set_ylabel("Node")
# Set y axis bounds
axis.set_ylim(0, len(keys))
# plt.show()
|
apache-2.0
|
tchakravarty/pmtk3
|
python/demos/linregDemo1.py
|
26
|
1104
|
#!/usr/bin/python2.4
import numpy
import scipy.stats
import matplotlib.pyplot as plt
def main():
# true parameters
w = 2
w0 = 3
sigma = 2
# make data
numpy.random.seed(1)
Ntrain = 20
xtrain = numpy.linspace(0,10,Ntrain)
ytrain = w*xtrain + w0 + numpy.random.random(Ntrain)*sigma
Ntest = 100
xtest = numpy.linspace(0,10,Ntest)
ytest = w*xtest + w0 + numpy.random.random(Ntest)*sigma
# from http://www2.warwick.ac.uk/fac/sci/moac/students/peter_cock/python/lin_reg/
# fit
west, w0est, r_value, p_value, std_err = scipy.stats.linregress(xtrain, ytrain)
# display
print "Param \t True \t Est"
print "w0 \t %5.3f \t %5.3f" % (w0, w0est)
print "w \t %5.3f \t %5.3f" % (w, west)
# plot
plt.close()
plt.plot(xtrain, ytrain, 'ro')
plt.hold(True)
#plt.plot(xtest, ytest, 'ka-')
ytestPred = west*xtest + w0est
#ndx = range(0, Ntest, 10)
#h = plt.plot(xtest[ndx], ytestPred[ndx], 'b*')
h = plt.plot(xtest, ytestPred, 'b-')
plt.setp(h, 'markersize', 12)
if __name__ == '__main__':
main()
|
mit
|
raghavrv/scikit-learn
|
sklearn/metrics/tests/test_ranking.py
|
11
|
43605
|
from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import ndcg_score
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`.
Note that this implementation fails on some edge cases.
For example, for constant predictions e.g. [0.5, 0.5, 0.5],
y_true = [1, 0, 0] returns an average precision of 0.33...
but y_true = [0, 0, 1] returns 1.0.
"""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def _average_precision_slow(y_true, y_score):
"""A second alternative implementation of average precision that closely
follows the Wikipedia article's definition (see References). This should
give identical results as `average_precision_score` for all inputs.
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
"""
precision, recall, threshold = precision_recall_curve(y_true, y_score)
precision = list(reversed(precision))
recall = list(reversed(recall))
average_precision = 0
for i in range(1, len(precision)):
average_precision += precision[i] * (recall[i] - recall[i - 1])
return average_precision
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = _average_precision_slow(y_true, probas_pred)
assert_array_almost_equal(precision_recall_auc, 0.859, 3)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, decimal=3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
# Here we are doing a terrible prediction: we are always getting
# it wrong, hence the average_precision_score is the accuracy at
# chance: 50%
assert_almost_equal(auc_prc, 0.5)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .5)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.5)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.5)
def test_average_precision_constant_values():
# Check the average_precision_score of a constant predictor is
# the TPR
# Generate a dataset with 25% of positives
y_true = np.zeros(100, dtype=int)
y_true[::4] = 1
# And a constant score
y_score = np.ones(100)
# The precision is then the fraction of positive whatever the recall
# is, as there is only one threshold:
assert_equal(average_precision_score(y_true, y_score), .25)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
# This test was expanded (added scaled_down) in response to github
# issue #3864 (and others), where overly aggressive rounding was causing
# problems for users with very small y_score values
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled_up = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_scaled_down = roc_auc_score(y_true, 1e-6 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled_up)
assert_equal(roc_auc, roc_auc_scaled_down)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled_up = average_precision_score(y_true, 100 * probas_pred)
pr_auc_scaled_down = average_precision_score(y_true, 1e-6 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled_up)
assert_equal(pr_auc, pr_auc_scaled_down)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def test_ndcg_score():
# Check perfect ranking
y_true = [1, 0, 2]
y_score = [
[0.15, 0.55, 0.2],
[0.7, 0.2, 0.1],
[0.06, 0.04, 0.9]
]
perfect = ndcg_score(y_true, y_score)
assert_equal(perfect, 1.0)
# Check bad ranking with a small K
y_true = [0, 2, 1]
y_score = [
[0.15, 0.55, 0.2],
[0.7, 0.2, 0.1],
[0.06, 0.04, 0.9]
]
short_k = ndcg_score(y_true, y_score, k=1)
assert_equal(short_k, 0.0)
# Check a random scoring
y_true = [2, 1, 0]
y_score = [
[0.15, 0.55, 0.2],
[0.7, 0.2, 0.1],
[0.06, 0.04, 0.9]
]
average_ranking = ndcg_score(y_true, y_score, k=2)
assert_almost_equal(average_ranking, 0.63092975)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
|
bsd-3-clause
|
IDEALLab/design_embeddings_jmd_2016
|
util.py
|
1
|
17261
|
##########################################
# File: util.py #
# Copyright Richard Stebbing 2014. #
# Distributed under the MIT License. #
# (See accompany file LICENSE or copy at #
# http://opensource.org/licenses/MIT) #
##########################################
# Imports
import re
# raise_if_not_shape
def raise_if_not_shape(name, A, shape):
"""Raise a `ValueError` if the np.ndarray `A` does not have dimensions
`shape`."""
if A.shape != shape:
raise ValueError('{}.shape != {}'.format(name, shape))
# previous_float
PARSE_FLOAT_RE = re.compile(r'([+-]*)0x1\.([\da-f]{13})p(.*)')
def previous_float(x):
"""Return the next closest float (towards zero)."""
s, f, e = PARSE_FLOAT_RE.match(float(x).hex().lower()).groups()
f, e = int(f, 16), int(e)
if f > 0:
f -= 1
else:
f = int('f' * 13, 16)
e -= 1
return float.fromhex('{}0x1.{:013x}p{:d}'.format(s, f, e))
##############################################################################
"""
Author(s): Wei Chen ([email protected])
"""
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.utils.graph import graph_shortest_path
from sklearn.neighbors import kneighbors_graph
from scipy.sparse.csgraph import connected_components
from sklearn.manifold import Isomap
from sklearn.preprocessing import scale
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from scipy.stats import pearsonr
from sklearn.externals import joblib
import ConfigParser
def create_dir(path):
if os.path.isdir(path):
pass
else:
os.mkdir(path)
def reduce_dim(data_h, plot=False, save=False, c=None):
if plot:
# Scree plot
plt.rc("font", size=12)
pca = PCA()
pca.fit(data_h)
plt.plot(range(1,data_h.shape[1]+1), pca.explained_variance_ratio_)
plt.xlabel('Dimensionality')
plt.ylabel('Explained variance ratio')
plt.title('Scree Plot')
plt.show()
plt.close()
# Dimensionality reduction
pca = PCA(n_components=.995) # 99.5% variance attained
data_l = pca.fit_transform(data_h)
print 'Reduced dimensionality: %d' % data_l.shape[1]
if save:
save_model(pca, 'xpca', c)
return data_l, pca.inverse_transform
def sort_eigen(M):
''' Sort the eigenvalues and eigenvectors in DESCENT order '''
w, v = np.linalg.eigh(M)
idx = w.argsort()[::-1]
w = w[idx]
v = v[:,idx]
return w, v
def find_gap(metrics, threshold=.99, method='difference', multiple=False, verbose=0):
''' Find the largest gap of any NONNEGATIVE metrics (which is in DESCENT order)
The returned index is before the gap
threshold: needs to be specified only if method is 'percentage'
multiple: whether to find multiple gaps
'''
if method == 'percentage':
s = np.sum(metrics)
for i in range(len(metrics)):
if np.sum(metrics[:i+1])/s > threshold:
break
if verbose == 2:
plt.figure()
plt.plot(metrics, 'o-')
plt.title('metrics')
plt.show()
return i
else:
if method == 'difference':
m0 = np.array(metrics[:-1])
m1 = np.array(metrics[1:])
d = m0-m1
elif method == 'divide':
metrics = np.clip(metrics, np.finfo(float).eps, np.inf)
m0 = np.array(metrics[:-1])
m1 = np.array(metrics[1:])
d = m0/m1
else:
print 'No method called %s!' % method
sys.exit(0)
if multiple:
# dmin = np.min(d)
# dmax = np.max(d)
# t = dmin + (dmax-dmin)/10 # set a threshold
# n_gap = sum(d > t)
# idx = d.argsort()[::-1][:n_gap]
# arggap = idx
tol = 1e-4
arggap = []
if d[0] > tol:
arggap.append(0)
for i in range(len(d)-1):
if d[i+1] > d[i]:
arggap.append(i+1)
arggap = np.array(arggap)
else:
arggap = np.argmax(d)
if verbose == 2:
plt.figure()
plt.subplot(211)
plt.plot(metrics, 'o')
plt.title('metrics')
plt.subplot(212)
plt.plot(d, 'o')
# plt.plot([0, len(d)], [t, t], 'g--')
plt.title('gaps')
plt.show()
gap = d[arggap]
return arggap, gap
def create_graph(X, n_neighbors, include_self=False, verbose=0):
kng = kneighbors_graph(X, n_neighbors, mode='distance', include_self=include_self)
nb_graph = graph_shortest_path(kng, directed=False)
if verbose:
# Visualize nearest neighbor graph
neigh = NearestNeighbors().fit(X)
nbrs = neigh.kneighbors(n_neighbors=n_neighbors, return_distance=False)
visualize_graph(X, nbrs)
return nb_graph
def get_geo_dist(X, K='auto', verbose=0):
m = X.shape[0]
if K == 'auto':
# Choose the smallest k that gives a fully connected graph
for k in range(2, m):
G = create_graph(X, k, verbose=verbose)
if connected_components(G, directed=False, return_labels=False) == 1:
break;
return G, k
else:
return create_graph(X, K, verbose=verbose)
def get_k_range(X, verbose=0):
N = X.shape[0]
# Select k_min
for k in range(1, N):
G = create_graph(X, k, include_self=False, verbose=verbose)
if connected_components(G,directed=False,return_labels=False) == 1:
break;
k_min = k
# Select k_max
for k in range(k_min, N):
kng = kneighbors_graph(X, k, include_self=False).toarray()
A = np.logical_or(kng, kng.T) # convert to undirrected graph
P = np.sum(A)/2
if 2*P/float(N) > k+2:
break;
k_max = k-1#min(k_min+10, N)
if verbose == 2:
print 'k_range: [%d, %d]' % (k_min, k_max)
if k_max < k_min:
print 'No suitable neighborhood size!'
return k_min, k_max
def get_candidate(X, dim, k_min, k_max, verbose=0):
errs = []
k_candidates = []
for k in range(k_min, k_max+1):
isomap = Isomap(n_neighbors=k, n_components=dim).fit(X)
rec_err = isomap.reconstruction_error()
errs.append(rec_err)
i = k - k_min
if i > 1 and errs[i-1] < errs[i-2] and errs[i-1] < errs[i]:
k_candidates.append(k-1)
if len(k_candidates) == 0:
k_candidates.append(k)
if verbose == 2:
print 'k_candidates: ', k_candidates
plt.figure()
plt.rc("font", size=12)
plt.plot(range(k_min, k_max+1), errs, '-o')
plt.xlabel('Neighborhood size')
plt.ylabel('Reconstruction error')
plt.title('Select candidates of neighborhood size')
plt.show()
return k_candidates
def pick_k(X, dim, k_min=None, k_max=None, verbose=0):
''' Pick optimal neighborhood size for isomap algothm
Reference:
Samko, O., Marshall, A. D., & Rosin, P. L. (2006). Selection of the optimal parameter
value for the Isomap algorithm. Pattern Recognition Letters, 27(9), 968-979.
'''
if k_min is None or k_max is None:
k_min, k_max = get_k_range(X, verbose=verbose)
ccs = []
k_candidates = range(k_min, k_max+1)#get_candidate(X, dim, k_min, k_max, verbose=verbose)
for k in k_candidates:
isomap = Isomap(n_neighbors=k, n_components=dim).fit(X)
F = isomap.fit_transform(X)
distF = pairwise_distances(F)
distX = create_graph(X, k, verbose=verbose)
cc = 1-pearsonr(distX.flatten(), distF.flatten())[0]**2
ccs.append(cc)
k_opt = k_candidates[np.argmin(ccs)]
if verbose == 2:
print 'k_opt: ', k_opt
plt.figure()
plt.rc("font", size=12)
plt.plot(k_candidates, ccs, '-o')
plt.xlabel('Neighborhood size')
plt.ylabel('Residual variance')
plt.title('Select optimal neighborhood size')
plt.show()
return k_opt
def estimate_dim(data, verbose=0):
''' Estimate intrinsic dimensionality of data
data: input data
Reference:
"Samko, O., Marshall, A. D., & Rosin, P. L. (2006). Selection of the optimal parameter
value for the Isomap algorithm. Pattern Recognition Letters, 27(9), 968-979."
'''
# Standardize by center to the mean and component wise scale to unit variance
data = scale(data)
# The reconstruction error will decrease as n_components is increased until n_components == intr_dim
errs = []
found = False
k_min, k_max = get_k_range(data, verbose=verbose)
for dim in range(1, data.shape[1]+1):
k_opt = pick_k(data, dim, k_min, k_max, verbose=verbose)
isomap = Isomap(n_neighbors=k_opt, n_components=dim).fit(data)
err = isomap.reconstruction_error()
#print(err)
errs.append(err)
if dim > 2 and errs[dim-2]-errs[dim-1] < .5 * (errs[dim-3]-errs[dim-2]):
intr_dim = dim-1
found = True
break
if not found:
intr_dim = 1
# intr_dim = find_gap(errs, method='difference', verbose=verbose)[0] + 1
# intr_dim = find_gap(errs, method='percentage', threshold=.9, verbose=verbose) + 1
if verbose == 2:
plt.figure()
plt.rc("font", size=12)
plt.plot(range(1,dim+1), errs, '-o')
plt.xlabel('Dimensionality')
plt.ylabel('Reconstruction error')
plt.title('Select intrinsic dimension')
plt.show()
return intr_dim
def get_singular_ratio(X_nbr, d):
x_mean = np.mean(X_nbr, axis=1).reshape(-1,1)
s = np.linalg.svd(X_nbr-x_mean, compute_uv=0)
r = (np.sum(s[d:]**2.)/np.sum(s[:d]**2.))**.5
return r
def select_neighborhood(X, dims, k_range=None, get_full_ind=False, verbose=0):
''' Inspired by the Neighborhood Contraction and Neighborhood Expansion algorithms
The selected neighbors for each sample point should reflect the local geometric structure of the manifold
Reference:
"Zhang, Z., Wang, J., & Zha, H. (2012). Adaptive manifold learning. IEEE Transactions
on Pattern Analysis and Machine Intelligence, 34(2), 253-265."
'''
print 'Selecting neighborhood ... '
m = X.shape[0]
if type(dims) == int:
dims = [dims] * m
if k_range is None:
k_min, k_max = get_k_range(X)
else:
k_min, k_max = k_range
# G = get_geo_dist(X, verbose=verbose)[0] # geodesic distances
# ind = np.argsort(G)[:,:k_max+1]
neigh = NearestNeighbors().fit(X)
ind = neigh.kneighbors(n_neighbors=k_max, return_distance=False)
ind = np.concatenate((np.arange(m).reshape(-1,1), ind), axis=1)
nbrs = []
# Choose eta
k0 = k_max
r0s =[]
for j in range(m):
X_nbr0 = X[ind[j,:k0]].T
r0 = get_singular_ratio(X_nbr0, dims[j])
r0s.append(r0)
r0s.sort(reverse=True)
j0 = find_gap(r0s, method='divide')[0]
eta = (r0s[j0]+r0s[j0+1])/2
# eta = 0.02
if verbose:
print 'eta = %f' % eta
for i in range(m):
''' Neighborhood Contraction '''
rs = []
for k in range(k_max, k_min-1, -1):
X_nbr = X[ind[i,:k]].T
r = get_singular_ratio(X_nbr, dims[i])
rs.append(r)
if r < eta:
ki = k
break
if k == k_min:
ki = k_max-np.argmin(rs)
nbrs.append(ind[i,:ki])
''' Neighborhood Expansion '''
pca = PCA(n_components=dims[i]).fit(X[nbrs[i]])
nbr_out = ind[i, ki:] # neighbors of x_i outside the neighborhood set by Neighborhood Contraction
for j in nbr_out:
theta = pca.transform(X[j].reshape(1,-1))
err = np.linalg.norm(pca.inverse_transform(theta) - X[j]) # reconstruction error
if err < eta * np.linalg.norm(theta):
nbrs[i] = np.append(nbrs[i], [j])
# print ki, len(nbrs[i])
# print max([len(nbrs[i]) for i in range(m)])
if verbose:
# Visualize nearest neighbor graph
visualize_graph(X, nbrs)
# Visualize neighborhood selection
if X.shape[1] > 3:
pca = PCA(n_components=3)
F = pca.fit_transform(X)
else:
F = np.zeros((X.shape[0], 3))
F[:,:X.shape[1]] = X
fig3d = plt.figure()
ax3d = fig3d.add_subplot(111, projection = '3d')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([F[:,0].max()-F[:,0].min(), F[:,1].max()-F[:,1].min(), F[:,2].max()-F[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(F[:,0].max()+F[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(F[:,1].max()+F[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(F[:,2].max()+F[:,2].min())
ax3d.scatter(Xb, Yb, Zb, c='white', alpha=0)
# Plot point sets in 3D
plot_samples = [0, 1]
nbr_indices = []
for i in plot_samples:
nbr_indices = list(set(nbr_indices) | set(nbrs[i]))
F_ = np.delete(F, nbr_indices, axis=0)
ax3d.scatter(F_[:,0], F_[:,1], F_[:,2], c='white')
colors = ['b', 'g', 'y', 'r', 'c', 'm', 'y', 'k']
from itertools import cycle
colorcycler = cycle(colors)
for i in plot_samples:
color = next(colorcycler)
ax3d.scatter(F[nbrs[i][1:],0], F[nbrs[i][1:],1], F[nbrs[i][1:],2], marker='*', c=color, s=100)
ax3d.scatter(F[i,0], F[i,1], F[i,2], marker='x', c=color, s=100)
plt.show()
if get_full_ind:
return nbrs, ind
else:
return nbrs
def visualize_graph(X, nbrs):
# Reduce dimensionality
if X.shape[1] > 3:
pca = PCA(n_components=3)
F = pca.fit_transform(X)
else:
F = np.zeros((X.shape[0], 3))
F[:,:X.shape[1]] = X
m = F.shape[0]
fig3d = plt.figure()
ax3d = fig3d.add_subplot(111, projection = '3d')
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([F[:,0].max()-F[:,0].min(), F[:,1].max()-F[:,1].min(), F[:,2].max()-F[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(F[:,0].max()+F[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(F[:,1].max()+F[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(F[:,2].max()+F[:,2].min())
ax3d.scatter(Xb, Yb, Zb, c='white', alpha=0)
# Plot point sets in 3D
ax3d.scatter(F[:,0], F[:,1], F[:,2], c='blue')
# Plot edges
# for i in range(m-1):
# for j in range(i+1, m):
# if j in nbrs[i]:
# line = np.vstack((F[i], F[j]))
# ax3d.plot(line[:,0], line[:,1], line[:,2], c='green')
for i in [3]:
for j in range(i+1, m):
if j in nbrs[i]:
line = np.vstack((F[i], F[j]))
ax3d.plot(line[:,0], line[:,1], line[:,2], c='green')
plt.show()
def get_fname(mname, c, directory='./trained_models/', extension='pkl'):
config = ConfigParser.ConfigParser()
config.read('config.ini')
source = config.get('Global', 'source')
noise_scale = config.getfloat('Global', 'noise_scale')
if source == 'sf':
alpha = config.getfloat('Superformula', 'nonlinearity')
beta = config.getint('Superformula', 'n_clusters')
sname = source + '-' + str(beta) + '-' + str(alpha)
elif source == 'glass' or source[:3] == 'sf-':
sname = source
if c is None:
fname = '%s/%s_%.4f_%s.%s' % (directory, sname, noise_scale, mname, extension)
else:
fname = '%s/%s_%.4f_%s_%d.%s' % (directory, sname, noise_scale, mname, c, extension)
return fname
def save_model(model, mname, c=None):
# Get the file name
fname = get_fname(mname, c)
# Save the model
joblib.dump(model, fname, compress=9)
print 'Model ' + mname + ' saved!'
def load_model(mname, c=None):
# Get the file name
fname = get_fname(mname, c)
# Load the model
model = joblib.load(fname)
return model
def save_array(array, dname, c=None):
# Get the file name
fname = get_fname(dname, c, extension='npy')
# Save the model
np.save(fname, array)
print 'Model ' + dname + ' saved!'
def load_array(dname, c=None):
# Get the file name
fname = get_fname(dname, c, extension='npy')
# Load the model
array = np.load(fname)
return array
|
mit
|
juanka1331/VAN-applied-to-Nifti-images
|
lib/data_loader/MRI_stack_NORAD.py
|
1
|
2116
|
import scipy.io as sio
import settings
import numpy as np
import nibabel as nib
from matplotlib import pyplot as plt
# Script con la materia gris
def get_parameters():
"""
function creates to avoid loading in memory the full stack
:return: Dict['imgsize|total_size|voxel_index]
"""
f = sio.loadmat(settings.MRI_stack_path_GM)
imgsize = f['imgsize'].astype('uint32').tolist()[0]
total_voxels = np.array(imgsize).prod()
return {'voxel_index': f['nobck_idx'],
'imgsize':imgsize,
'total_size': total_voxels}
def get_gm_stack():
"""
This function returns a dictionary with these three values:
1)
:return:
"""
f = sio.loadmat(settings.MRI_stack_path_GM)
imgsize = f['imgsize'].astype('uint32').tolist()[0]
total_voxels = np.array(imgsize).prod()
return {'labels': f['labels'],
'voxel_index': f['nobck_idx'],
'stack': f['stack_NORAD_GM'],
'imgsize':imgsize,
'n_patients': len(f['labels']),
'total_size': total_voxels}
def get_wm_stack():
"""
This function returns a dictionary with these three values:
1)
:return:
"""
f = sio.loadmat(settings.MRI_stack_path_WM)
imgsize = f['imgsize'].astype('uint32').tolist()[0]
total_voxels = np.array(imgsize).prod()
return {'labels': f['labels'],
'voxel_index': f['nobck_idx'],
'stack': f['stack_NORAD_WM'],
'imgsize':imgsize,
'n_patients': len(f['labels']),
'total_size': total_voxels}
def load_patients_labels():
dict_norad = get_gm_stack() # 'stack' 'voxel_index' 'labels'
return dict_norad['labels']
def test():
data = get_gm_stack()
sample = data['stack'][0,:]
template = np.zeros(data['imgsize'], dtype=float)
template = template.flatten()
template[data['voxel_index']] = sample
out = np.reshape(template, data['imgsize'])
plt.imshow(out[:, 77, :], cmap='gray')
plt.show(block=True)
img = nib.Nifti1Image(out, np.eye(4))
img.to_filename('test4d.nii.gz')
#test()
|
gpl-2.0
|
david-hoffman/dphutils
|
lpsvd.py
|
1
|
20843
|
"""
LPSVD was developed by Tufts and Kumaresan (Tufts, D.; Kumaresan, R. IEEE Transactions on Acoustics,
Speech and signal Processing 1982, 30, 671 – 675.) as a method of harmonic inversion, i.e. decomposing
a time signal into a linear combination of (decaying) sinusoids.
A great reference that is easy to read for the non-EECS user is:
Barkhuijsen, H.; De Beer, R.; Bovée, W. M. M. .; Van Ormondt, D. J. Magn. Reson. (1969) 1985, 61, 465–481.
This particular implementation was adapted, in part, from matNMR by Jacco van Beek
http://matnmr.sourceforge.net/
and Complex Exponential Analysis by Greg Reynolds
http://www.mathworks.com/matlabcentral/fileexchange/12439-complex-exponential-analysis/
Author: David Hoffman ([email protected])
Date: Aug, 2015
"""
import numpy as np
import pandas as pd
from scipy.linalg import hankel, svd, pinv, inv
# K_SPEED_OF_LIGHT = 2.99792458e-5 #(cm/fs)
def LPSVD(signal, M=None, lfactor=1 / 2, removebias=True):
"""
A function that performs the linear prediction-singular value decomposition
of a signal that is assumed to be a linear combination of damped sinusoids
Parameters
----------
signal : ndarray
The signal to be analyzed
M : int
Model order, if None, it will be estimated
lfactor : float
How to size the Hankel matrix, Tufts and Kumaresan suggest 1/3-1/2
Default number of prediction coefficients is half the number of points
in the input wave
removebias : bool
If true bias will be removed from the singular values of A
"""
if lfactor > 3 / 4:
print("You attempted to use an lfactor greater than 3/4, it has been set to 3/4")
lfactor = 3 / 4
# length of signal
N = len(signal)
# Sizing of the Hankel matrix, i.e. the backward prediction matrix
L = int(np.floor(N * lfactor))
# Shift the signal forward by 1
rollsig = np.roll(signal, -1)
# Generate the Hankel matrix
A = hankel(rollsig[: N - L], signal[L:])
# Take the conjugate of the Hankel Matrix to form the prediction matrix
A = np.conj(A)
# Set up the data vector, the vector to be "predicted"
h = signal[: N - L]
h = np.conj(h) # Take the conjugate
U, S, VT = svd(A) # Perform an SVD on the Hankel Matrix
# We can estimate the model order if the user hasn't selected one
if M is None:
M = estimate_model_order(S, N, L) + 8
print("Estimated model order: {}".format(M))
if M > len(S):
M = len(S)
print("M too large, set to max = ".format(M))
# remove bias if needed
if removebias:
# Here we subtract the arithmatic mean of the singular values determined to be
# noise from the rest of the singular values as described in Barkhuijsen
S -= S[M:].mean()
S = 1 / S[:M] # invert S and truncate
# Redimension the matrices to speed up the matrix multiplication step
VT = VT[:M, :] # Make VT the "right" size
U = U[:, :M] # Make U the "right" size
# Now we can generate the LP coefficients
lp_coefs = -1 * np.conj(VT.T).dot(np.diag(S)).dot(np.conj(U.T)).dot(h)
# Error check: are there any NaNs or INFs in lp_coefs?
if not np.isfinite(lp_coefs).all():
raise ValueError(
"There has been an error generating the prediction-error filter polynomial"
)
# Need to add 1 to the beginning of lp_coefs before taking roots
lp_coefs = np.insert(lp_coefs, 0, 1)
# I can now find the roots of B (assuming B represents the coefficients of a polynomial)
# Note that NumPy defines polynomial coefficients with the larges power first
# so we have to reverse the coefficients before finding the roots.
myroots = np.roots(lp_coefs[::-1])
# Remove the poles that lie within the unit circle on the complex plane as directed by Kurmaresan
# Actually it seems the correct thing to do is to remove roots with positive damping constants
usedroots = np.array([np.conj(np.log(root)) for root in myroots if np.abs(root) <= 1])
# Error checking: see if we removed all roots!
if len(usedroots) == 0:
raise ValueError("There has been an error finding the real poles")
# sort by freqs
usedroots = usedroots[np.imag(usedroots).argsort()]
# Lets make a DataFrame with dimension labels to store all our parameters
LPSVD_coefs = pd.DataFrame(columns=["amps", "freqs", "damps", "phase"])
# We can directly convert our poles into estimated damping factors and frequencies
LPSVD_coefs.damps = np.real(usedroots)
LPSVD_coefs.freqs = np.imag(usedroots) / (2 * np.pi)
# But we need to do a little more work to get the predicted amplitudes and phases
# Here we generate our basis matrix
basis = np.array([np.exp(np.arange(len(signal)) * root) for root in usedroots])
# Take the inverse
pinvBasis = pinv(basis)
# And apply it to our signal to recover our predicted amplitudes
# Amps here are complex meaning it has amplitude and phase information
cAmps = pinvBasis.T.dot(signal)
LPSVD_coefs.amps = np.abs(cAmps)
LPSVD_coefs.phase = np.angle(cAmps)
# Calculate the errors
calc_LPSVD_error(LPSVD_coefs, signal)
return LPSVD_coefs # , Errors
def estimate_model_order(s, N, L):
"""
Adapted from from Complex Exponential Analysis by Greg Reynolds
http://www.mathworks.com/matlabcentral/fileexchange/12439-complex-exponential-analysis/
Use the MDL method as in Lin (1997) to compute the model
order for the signal. You must pass the vector of
singular values, i.e. the result of svd(T) and
N and L. This method is best explained by Scharf (1992).
Parameters
----------
s : ndarray
singular values from SVD decomposition
N : int
L : int
Returns
-------
M : float
Estimated model order
"""
MDL = np.zeros(L)
for i in range(L):
MDL[i] = -N * np.log(s[i:L]).sum()
MDL[i] += N * (L - i) * np.log(s[i:L].sum() / (L - i))
MDL[i] += i * (2 * L - i) * np.log(N) / 2
return MDL.argmin()
def calc_LPSVD_error(LPSVD_coefs, data):
"""
A function that estimates the errors on the LPSVD parameters using the Cramer-Rao
lower bound (http://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93Rao_bound).
This implementation is based on the work of Barkhuijsen et al (http://dx.doi.org/10.1016/0022-2364(86)90446-4)
Parameters
----------
LPSVD_coefs : DataFrame
Coefficients calculated from the LPSVD algorithm, we will add errors to this DataFrame
data : ndarray
The data from which the LPSVD coefficients were calculated
"""
# ***The first thing to do is to calculated the RMS of the residuals***
# We reconstruct the model from the parameters
recon = reconstruct_signal(LPSVD_coefs, data)
p = np.arange(len(data))
res = data - recon
# Calculate the RMS
RMS = np.sqrt((res ** 2).mean())
# Next we need to generate the Fisher matrix
size = len(LPSVD_coefs) * 4
FisherMat = np.zeros((size, size))
# We'll reuse res for the intermediate calculations
# This implementation is based on the work of Barkhuijsen et al (http://dx.doi.org/10.1016/0022-2364(86)90446-4)
for i, rowi in LPSVD_coefs.iterrows():
ampi = rowi.amps
freqi = rowi.freqs
dampi = rowi.damps
phasei = rowi.phase
for j, rowj in LPSVD_coefs.iterrows():
ampj = rowj.amps
freqj = rowj.freqs
dampj = rowj.damps
phasej = rowj.phase
res = np.exp(
p * complex(dampi + dampj, 2 * np.pi * (freqi - freqj))
+ complex(0, 1) * (phasei - phasej)
)
chi0 = np.real(res).sum()
zeta0 = np.imag(res).sum()
res = p * np.exp(
p * complex(dampi + dampj, 2 * np.pi * (freqi - freqj))
+ complex(0, 1) * (phasei - phasej)
)
chi1 = np.real(res).sum()
zeta1 = np.imag(res).sum()
res = p ** 2 * np.exp(
p * complex(dampi + dampj, 2 * np.pi * (freqi - freqj))
+ complex(0, 1) * (phasei - phasej)
)
chi2 = np.real(res).sum()
zeta2 = np.imag(res).sum()
# First Row
FisherMat[4 * i + 0][4 * j + 0] = ampi * ampj * chi2
FisherMat[4 * i + 0][4 * j + 1] = -ampi * zeta1
FisherMat[4 * i + 0][4 * j + 2] = ampi * ampj * zeta2
FisherMat[4 * i + 0][4 * j + 3] = ampi * ampj * chi1
# Second Row
FisherMat[4 * i + 1][4 * j + 0] = ampj * zeta1
FisherMat[4 * i + 1][4 * j + 1] = chi0
FisherMat[4 * i + 1][4 * j + 2] = -ampj * chi1
FisherMat[4 * i + 1][4 * j + 3] = ampj * zeta0
# Third Row
FisherMat[4 * i + 2][4 * j + 0] = -ampi * ampj * zeta2
FisherMat[4 * i + 2][4 * j + 1] = -ampi * chi1
FisherMat[4 * i + 2][4 * j + 2] = ampi * ampj * chi2
FisherMat[4 * i + 2][4 * j + 3] = -ampi * ampj * zeta1
# Fourth Row
FisherMat[4 * i + 3][4 * j + 0] = ampi * ampj * chi1
FisherMat[4 * i + 3][4 * j + 1] = -ampi * zeta0
FisherMat[4 * i + 3][4 * j + 2] = ampi * ampj * zeta1
FisherMat[4 * i + 3][4 * j + 3] = ampi * ampj * chi0
FisherMat = inv(FisherMat) # Replace the Fisher matrix with its inverse
FisherMat *= 2 * RMS ** 2
LPSVD_coefs.insert(4, "amps_error", np.nan)
LPSVD_coefs.insert(5, "freqs_error", np.nan)
LPSVD_coefs.insert(6, "damps_error", np.nan)
LPSVD_coefs.insert(7, "phase_error", np.nan)
# Fill up the Error wave with the errors.
for i in range(len(LPSVD_coefs)):
LPSVD_coefs.amps_error.loc[i] = np.sqrt((FisherMat[1 + i * 4][1 + i * 4]))
LPSVD_coefs.freqs_error.loc[i] = np.sqrt((FisherMat[0 + i * 4][0 + i * 4]))
LPSVD_coefs.damps_error.loc[i] = np.sqrt((FisherMat[2 + i * 4][2 + i * 4]))
LPSVD_coefs.phase_error.loc[i] = np.sqrt((FisherMat[3 + i * 4][3 + i * 4]))
return LPSVD_coefs
def reconstruct_signal(LPSVD_coefs, signal, ampcutoff=0, freqcutoff=0, dampcutoff=0):
"""
#A function that reconstructs the original signal in the time domain and frequency domain
#from the LPSVD algorithms coefficients, which are passed as LPSVD_coefs
#http://mathworld.wolfram.com/FourierTransformLorentzianFunction.html
WAVE LPSVD_coefs #coefficients from the LPSVD algorithm
String name #Name of the generated waves
Variable length #Length of the time domain signal
Variable timeStep #Sampling frequency with which the signal was recorded, in fs
Variable dataReal #Should the output time domain data be real?
Variable ampcutoff #Cutoff for the amplitudes of the components
Variable freqcutoff #Cutoff for the frequency of the components
Variable dampcutoff #Cutoff for the damping of the components
"""
# Initialize time domain signal
time_domain = np.zeros_like(signal, dtype=complex)
p = np.arange(len(signal))
for i, row in LPSVD_coefs.iterrows():
damp = -row.damps / np.pi
if row.amps ** 2 > ampcutoff and damp >= dampcutoff:
# Keep in mind that LPSVD_coefs were constructed agnostic to the actual sampling
# frequency so we will reconstruct it in the same way
amp = row.amps
damp = row.damps
phase = row.phase
freq = row.freqs
time_domain += amp * np.exp(
p * complex(damp, 2 * np.pi * freq) + complex(0, 1) * phase
)
if signal.dtype != complex:
time_domain = np.real(time_domain)
return time_domain
# Function/S Cadzow(signal, M, iters,[lfactor,q])
# #Remove noise using the Cadzow composite property mapping method.
# #See Cadzow, J. A. IEEE Transactions on Acoustics, Speech and signal Processing 1988, 36, 49 –62.
# #Adapted from from Complex Exponential Analysis by Greg Reynolds
# #http://www.mathworks.com/matlabcentral/fileexchange/12439-complex-exponential-analysis/
#
# Wave signal #The signal to be filtered
# Variable M #The expected number of signals (2 times the number of damped sinusoids
# Variable iters #Number of iterations to be performed
#
# Variable lfactor #User selectable factorization of the Hankel Matrix
# Variable q #Verbose or not
#
# if(ParamIsDefault(lfactor))
# lfactor = 1/2
#
#
# if(ParamIsDefault(q))
# q=0
# Else
# q=1
#
#
# #We want this function to be data folder aware
# #We'll do all our calculations in a specific data folder and then kill that folder at the end
# String savDF= GetDataFolder(1) # Save current DF for restore.
# if( DataFolderExists("root:Cadzow_Data") )
# SetDataFolder root:Cadzow_Data
# else
# NewDataFolder/O/S root:Cadzow_Data # Our stuff goes in here.
#
#
# #Timing
# Variable timerRef=startMSTimer
#
# Variable N = len(signal);
# Variable L = floor(N*lfactor);
#
# # T is the prediction matrix before filtering.
# Wave/C T = $Hankel(signal, N-L, L+1)
# T = conj(T)
#
# if(M>(N-L))
# M = N-L
# print "M too large M set to: " + num2str(M)
#
#
# Variable i = 0
# Variable tol = 0
# Variable r = 0
#
# print "Beginning Cadzow filtration, press ESC to abort, press CMD to check status."
#
# for(i=0;i<iters;i+=1)
#
# # decompose T
# #MatrixSVD Matrix
# MatrixSVD T
#
# WAVE/C S = W_W
# WAVE/C U = M_U
# WAVE/C VT = M_VT
#
# # check current rank
# tol = L*5e-16
# Duplicate/O S, S2
# S2 = (s>tol)
# r = sum(S2)
#
# if(q || (GetKeyState(0) & 1))
# printf "Cadzow iteration %d (rank is %d, target is %d).\r", i, r,M
#
#
# if(r <= M)
# Printf "Successful completion: "
# break
# elif( r > M )
# #Filter the hankel matrix
# S = S*(p < M)
# Redimension/N=(-1,M) U
# Redimension/N=(M,-1) VT
# MatrixOp/C/O T = U x DiagRC(S,M,M) x VT
# # average to restore Hankel structure
# Wave recon_signal = $unHankelAvg(T)
# WAVE/C T = $Hankel(recon_signal,N-L,L+1)
#
# if (GetKeyState(0) & 32) # Is Escape key pressed now?
# Printf "User abort: "
# Break
#
# EndFor
#
# # need to extract data from matrix Tr
# T = conj(T)
#
# #Move the results to the original data folder
# Duplicate/O $unHankelAvg(T), $(savDF+nameOfWave(signal)+"_cad")
# WAVE nSignal = $(savDF+nameOfWave(signal)+"_cad")
# SetDataFolder savDF # Restore current DF.
#
# #Clean up
# KillDataFolder root:Cadzow_Data
#
# #Scale the new signal appropriately
# CopyScales/P signal, nSignal
#
# #if the original signal was real, make the new signal real as well
# if((WaveType(signal) & 2^0) == 0)
# Redimension/R nSignal
#
#
# #Finish up the timing
# Variable microseconds = stopMSTimer(timerRef)
# Variable minutes = floor(microseconds/(60e6))
# Variable seconds = microseconds/(1e6)-minutes*60
#
# if(!q)
# printf "Final rank is %d, target is %d, ", r,M
#
#
# Printf "%d iterations took ", i
# if(minutes > 1)
# Printf "%g minutes and ",minutes
# elif(minutes > 0)
# Printf "1 minute and "
#
# Printf "%g seconds, for %g sec/iter.\r",seconds,microseconds/(1e6)/i
#
# return GetWavesDataFolder($(nameOfWave(signal)+"_cad"),2)
# End
#
# STATIC Function/S unHankelAvg(Hankel)
# #A function that takes a Hankel matrix and returns the original signal
# #that it was formed from by averaging along the anti-diagonals
# Wave/C Hankel #The matrix to be inverted
#
# Variable numRows = DimSize(Hankel,0)
# Variable numCols = DimSize(Hankel,1)
#
# #Make the signal to be returned, make sure to set to zero!
# Make/C/D/O/N=(numRows+numCols-1) mySignal=0
#
# variable i=0,j=0
# Duplicate/C/O mySignal myNorm #Make the normalizing wave
# for(i=0;i<numRows;i+=1)
# for(j=0;j<numCols;j+=1)
# #Build up the signal and the norm
# mySignal[i+j]+=Hankel[i][j]
# myNorm[i+j] += complex(1,0)
# EndFor
# EndFor
# mySignal=mySignal/myNorm
# return GetWavesDataFolder(mySignal,2)
# End
#
#
# Function OptimizeLPSVDCoefs(data,LPSVD_coefs,[ampcutoff,freqcutoff,dampcutoff,holdfreqphase])
# Wave data #The original data
# Wave LPSVD_coefs #Parameters to optimize
# Variable ampcutoff, freqcutoff,dampcutoff #Cutoff parameters to remove spurious values
# Variable holdfreqphase #hold the phases and frequencies constant during the fit
#
# if(ParamIsDefault(ampcutoff))
# ampcutoff=0
#
#
# if(ParamIsDefault(freqcutoff))
# freqcutoff=0
#
#
# if(ParamIsDefault(dampcutoff))
# dampcutoff=0
#
#
# if(ParamIsDefault(holdfreqphase))
# holdfreqphase=0
#
#
# #Make a copy of the LPSVD_coefs, we'll use this wave later
# #to repack to optimized variables
# Duplicate/O LPSVD_coefs $("opt"+NameOfWave(LPSVD_coefs))
# WAVE newLPSVD_coefs = $("opt"+NameOfWave(LPSVD_coefs))
#
# #Make a copy of data and remove the scaling from the copy.
# Duplicate/O data $("fit_"+nameofwave(data))
# WAVE newData = $("fit_"+nameofwave(data))
# SetScale/P x,0,1,"", newData
#
# Variable numComponents = DimSize(LPSVD_coefs,0)
# variable i = 0
# String removedComponents = ""
# for(i=numComponents;i>0;i-=1)
# if((newLPSVD_coefs[i-1][%amps])^2<ampcutoff || (-LPSVD_coefs[i-1][%damps]/K_SPEED_OF_LIGHT/dimdelta(data,0)/np.pi) < dampcutoff || abs(newLPSVD_coefs[i-1][%freqs])<freqcutoff)
# removedComponents += num2istr(abs(newLPSVD_coefs[i-1][%freqs])/K_SPEED_OF_LIGHT/DimDelta(data,0)) +", "
# DeletePoints (i-1),1, newLPSVD_coefs
# numComponents-=1
#
# EndFor
#
# if(strlen(removedComponents))
# print "The following frequency components were removed: " + removedComponents
#
#
# #unpack LPSVD_coefs into a regular coefficient wave
# #Make use of the fact that only half of the coefficients are necessary
# #Also, set any frequency below some tolerance to zero and hold it there
# Variable numCoefs = ceil(numComponents/2)
# Make/D/O/N=(numCoefs*4) myCoefs
# String HoldStr = ""
# for(i=0;i<numCoefs;i+=1)
# myCoefs[4*i] = 2*LPSVD_coefs[i][%amps]
# myCoefs[4*i+1] = LPSVD_coefs[i][%damps]
# if(abs(LPSVD_coefs[i][%freqs])<1e-14)
# myCoefs[4*i+2] = 0
# myCoefs[4*i+3] = 0
# Else
# myCoefs[4*i+2] = LPSVD_coefs[i][%freqs]
# myCoefs[4*i+3] = LPSVD_coefs[i][%phase]
#
# if(holdfreqphase)
# HoldStr+="0011"
# Else
# HoldStr+="0000"
#
# EndFor
#
# #if there are an odd number of components the middle one is zero frequency
# if(numCoefs-floor(DimSize(LPSVD_coefs,0)/2))
# myCoefs[4*(numCoefs-1)] /= 2
#
# Variable V_FitNumIters
# Variable V_FitMaxIters=200
# #do the optimization (we're using funcfit, so we're minimizing the chi^2)
# FuncFit/H=holdstr/ODR=2/N/W=2/Q decayingSinusoids, myCoefs, newData
#
# print "Number of interations: "+num2str(V_FitNumIters)
# #Well use the newData wave to hold the fit, why not?
# newData = decayingSinusoids(myCoefs,p)
#
# #return the scaling
# CopyScales/P data newData
#
# #Repack
# for(i=0;i<numCoefs;i+=1)
# newLPSVD_coefs[i][%amps] = myCoefs[4*i]/2
# newLPSVD_coefs[i][%damps] = myCoefs[4*i+1]
# newLPSVD_coefs[i][%freqs] = myCoefs[4*i+2]
# newLPSVD_coefs[i][%phase] = myCoefs[4*i+3]
#
# newLPSVD_coefs[2*numCoefs-i-1][%amps] = myCoefs[4*i]/2
# newLPSVD_coefs[2*numCoefs-i-1][%damps] = myCoefs[4*i+1]
# newLPSVD_coefs[2*numCoefs-i-1][%freqs] = -myCoefs[4*i+2]
# newLPSVD_coefs[2*numCoefs-i-1][%phase] = -myCoefs[4*i+3]
# EndFor
# End
#
# Function decayingSinusoids(w,t)
# #w[i] = amp
# #w[i+1] = damp
# #w[i+2] = freq
# #w[i+3] = phase
# Wave w
# Variable t
#
# Variable val=0
# Variable i=0
# Variable npts = len(w)
# for(i=0;i<npts;i+=4)
# val += w[i]*exp(t*w[i+1])*Cos(2*np.pi*w[i+2]*t+w[i+3])
# EndFor
#
# return val
# End
|
apache-2.0
|
ahwillia/tensortools
|
examples/shift_cpd.py
|
1
|
4661
|
"""
This script generates Fig. 4 in Williams (2020). bioRxiv. 2020.03.02.974014
"""
from tensortools.cpwarp import ShiftedCP, fit_shifted_cp
from scipy.ndimage import gaussian_filter1d
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1234)
# Parameters defining synthetic dataset.
MAX_SHIFT = 0.15
N_TIMEBINS = 120
N_NEURONS = 60
N_TRIALS = 200
BACKGROUND_RATE = .02
# Create three low-dimensional factor matrices; rank-2 data tensor.
#
# - neural_factors : neurons x 2
# - trial_factors : trials x 2
# - temporal_factors : timebins x 2
#
# See Williams et al. (2018) for the motivating application.
neural_factors = np.row_stack([
np.repeat([0, 1], N_NEURONS // 2),
np.repeat([1, 0], N_NEURONS // 2)
])
trial_factors = np.row_stack([
np.linspace(0, 1, N_TRIALS),
np.linspace(0, 1, N_TRIALS)[::-1],
])
temporal_factors = np.zeros((2, N_TIMEBINS))
temporal_factors[0][[40, 80]] = 1
temporal_factors[1][[40, 80]] = 1
temporal_factors = gaussian_filter1d(temporal_factors, 1.5, axis=1)
# Sample random shifts on each trial for each component.
rand_shifts = np.random.uniform(
low=N_TIMEBINS * -MAX_SHIFT,
high=N_TIMEBINS * MAX_SHIFT,
size=(2, trial_factors.shape[1])
)
# Create `ShiftedCP` model representing ground truth.
_m = ShiftedCP(
trial_factors,
neural_factors,
temporal_factors,
u_s=rand_shifts # shifts for axis=0
)
# Create (trials x neurons x timebins) tensor of firing rates.
spike_prob = _m.predict() + BACKGROUND_RATE
# Sample spikes from a Poisson distribution
data = np.random.poisson(spike_prob).astype(float)
# Shuffle the neuron indices.
shuff = np.random.permutation(data.shape[1])
data_shuff = data[:, shuff, :]
# === PLOT SPIKES === #
fig1, axes = plt.subplots(
2, 5, sharey=True, sharex=True, figsize=(6, 3.5)
)
for k, axc in zip([0, 49, 99, 149, 199], axes.T):
y, x = np.where(data_shuff[k])
axc[0].set_title("trial\n{}/{}".format(k + 1, data.shape[0]))
axc[0].scatter(x, y, s=5, lw=0, color="k")
y, x = np.where(data[k])
axc[1].scatter(x, y, s=5, lw=0, c=["b" if yi > 30 else "r" for yi in y])
axes[-1, -1].set_xlim([0, temporal_factors.shape[1]])
axes[-1, -1].set_ylim([-2, neural_factors.shape[1] + 2])
axes[0, 0].set_ylabel("unsorted\nneurons")
axes[1, 0].set_ylabel("sorted\nby model")
for ax in axes.ravel():
ax.set_xticks([])
ax.set_yticks([])
fig1.tight_layout()
fig1.subplots_adjust(wspace=.1, hspace=.1)
# === FIT TENSOR DECOMPOSITIONS === #
from tensortools.cpwarp import fit_shifted_cp
# Rank-2 classic tensor decomposition.
model1 = fit_shifted_cp(
data, 2, boundary="wrap",
n_restarts=5,
max_shift_axis0=None,
max_shift_axis1=1e-10, # essentially zero shift
max_iter=100,
u_nonneg=True,
v_nonneg=True,
)
# Rank-2 decomposition with per-trial shifts.
model2 = fit_shifted_cp(
data, 2, boundary="wrap",
n_restarts=5,
max_shift_axis0=MAX_SHIFT,
max_shift_axis1=None,
max_iter=100,
u_nonneg=True,
v_nonneg=True,
)
# Re-order the model factors to align with the ground truth.
if model2.factors[0][:,-1][0] < model2.factors[0][:,-1][1]:
model2.permute([1, 0])
# === PLOT RESULTS === #
def plot_result(m, colors=("b", "r")):
fig, axes = plt.subplots(2, 3, figsize=(6, 4))
def normalize(f):
return f / np.linalg.norm(f)
axes[0, 0].plot(normalize(m.factors[0][0]), color=colors[0])
axes[1, 0].plot(normalize(m.factors[0][1]), color=colors[1])
axes[0, 1].plot(normalize(m.factors[1][0]), color=colors[0])
axes[1, 1].plot(normalize(m.factors[1][1]), color=colors[1])
axes[0, 2].plot(normalize(m.factors[2][0]), color=colors[0])
axes[1, 2].plot(normalize(m.factors[2][1]), color=colors[1])
for ax in axes[0]:
ax.set_xticklabels([])
for ax in axes.ravel():
ax.set_yticklabels([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
axes[0, 0].set_ylabel("#1", labelpad=-1)
axes[1, 0].set_ylabel("#2", labelpad=-1)
# axes[0, 0].set_title("trial\nfactors")
# axes[0, 1].set_title("neuron\nfactors")
# axes[0, 2].set_title("temporal\nfactors")
axes[1, 0].set_xlabel("trials", labelpad=0)
axes[1, 1].set_xlabel("neurons", labelpad=0)
axes[1, 2].set_xlabel("time (a.u.)", labelpad=0)
return fig
fig2 = plot_result(_m)
fig2.suptitle("ground truth")
fig3 = plot_result(model1, colors=("k", "k"))
fig3.suptitle("vanilla tensor decomp.")
fig4 = plot_result(model2)
fig4.suptitle("shifted tensor decomp.")
for fig in fig2, fig3, fig4:
fig.tight_layout()
fig.subplots_adjust(top=.9)
plt.show()
|
mit
|
DougBurke/astropy
|
astropy/conftest.py
|
1
|
1127
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
from importlib.util import find_spec
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES
from astropy.tests.helper import enable_deprecations_as_exceptions
if find_spec('asdf') is not None:
from asdf import __version__ as asdf_version
if asdf_version >= '2.0.0':
pytest_plugins = ['asdf.tests.schema_tester']
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
enable_deprecations_as_exceptions(
include_astropy_deprecations=False,
# This is a workaround for the OpenSSL deprecation warning that comes from
# the `requests` module. It only appears when both asdf and sphinx are
# installed. This can be removed once pyopenssl 1.7.20+ is released.
modules_to_ignore_on_import=['requests'])
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('Agg')
PYTEST_HEADER_MODULES['Cython'] = 'cython'
|
bsd-3-clause
|
nmartensen/pandas
|
pandas/tests/io/test_clipboard.py
|
13
|
4988
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
from textwrap import dedent
import pytest
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.io.clipboard.exceptions import PyperclipException
from pandas.io.clipboard import clipboard_set
try:
DataFrame({'A': [1, 2]}).to_clipboard()
_DEPS_INSTALLED = 1
except PyperclipException:
_DEPS_INSTALLED = 0
@pytest.mark.single
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
class TestClipboard(object):
@classmethod
def setup_class(cls):
cls.data = {}
cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['float'] = mkdf(5, 3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
# Test columns exceeding "max_colwidth" (GH8305)
_cw = get_option('display.max_colwidth') + 1
cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test GH-5346
max_rows = get_option('display.max_rows')
cls.data['longdf'] = mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
# unicode round trip test for GH 13747, GH 12529
cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
cls.data_types = list(cls.data.keys())
@classmethod
def teardown_class(cls):
del cls.data_types, cls.data
def check_round_trip_frame(self, data_type, excel=None, sep=None,
encoding=None):
data = self.data[data_type]
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
if sep is not None:
result = read_clipboard(sep=sep, index_col=0, encoding=encoding)
else:
result = read_clipboard(encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, sep=',')
self.check_round_trip_frame(dt, sep=r'\s+')
self.check_round_trip_frame(dt, sep='|')
def test_round_trip_frame_string(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, excel=False)
def test_round_trip_frame(self):
for dt in self.data_types:
self.check_round_trip_frame(dt)
def test_read_clipboard_infer_excel(self):
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard()
# excel data is parsed correctly
assert df.iloc[1][1] == 'Harry Carney'
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard()
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard()
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self):
# test case for testing invalid encoding
data = self.data['string']
with pytest.raises(ValueError):
data.to_clipboard(encoding='ascii')
with pytest.raises(NotImplementedError):
pd.read_clipboard(encoding='ascii')
def test_round_trip_valid_encodings(self):
for enc in ['UTF-8', 'utf-8', 'utf8']:
for dt in self.data_types:
self.check_round_trip_frame(dt, encoding=enc)
|
bsd-3-clause
|
joshloyal/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
28
|
3697
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i = 0
for train, test in cv.split(X, y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
plt.plot(fpr, tpr, lw=1, alpha=0.3,
label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Luck', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/decomposition/tests/test_nmf.py
|
33
|
6189
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
bsd-3-clause
|
sergpolly/GlycoMadness
|
stage4_eval_bad_combine_out.py
|
1
|
16402
|
import os
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqRecord
import pandas as pd
import numpy as np
import ms_module as ms
import re
############################
from StringIO import StringIO
#
import argparse
#
#
# HOW TO LAUNCH THIS THING ...
# %run stage3_gsites_catalog.py --prefix ../raw_data/New_files_to_analyze/011216\ glycocapture\ 90-90 -m pept_prot_map.csv -g pulled_proteins.gb -s specs.xls
#
# do some arguments parsing to make the script looks civilized ...
parser = argparse.ArgumentParser()
parser.add_argument("-e","--exp_num",
help="specify number of an experiment",required=True)
parser.add_argument("-b","--bad_map",
help="specify file name of bad peptide protein map file (with/without path)",required=True)
parser.add_argument("-o","--good_final_out",
help="specify file name of the FINAL output with GOOD data (with/without path)",required=True)
# parser.add_argument("-m","--pept_prot_map",
# help="specify file name of peptide summary with unique of fetchids of matching proteins (with/without path)",required=True)
parser.add_argument("-g","--genbank",
help="specify file name of genbank records with pulled proteins (with/without path)",required=True)
parser.add_argument("-s","--spec_summary", help="speicfy spectrum file name (with/without path)",required=True)
parser.add_argument("--prefix", help="specify common part of the path for peptide and spectrum files")
parser.add_argument("--separator", help="speicfy separator type in the input data",default='tab')
args = parser.parse_args()
#
###############################################
if args.prefix is not None:
bad_map_fname = os.path.join( args.prefix, args.bad_map )
good_fname = os.path.join( args.prefix, args.good_final_out )
# pep_map_fname = os.path.join( args.prefix, args.pept_prot_map )
spec_fname = os.path.join( args.prefix, args.spec_summary )
gb_fname = os.path.join( args.prefix, args.genbank )
else:
bad_map_fname = args.bad_map
good_fname = args.good_final_out
# pep_map_fname = args.pept_prot_map
spec_fname = args.spec_summary
gb_fname = args.genbank
# get the common path for later use ...
common_path = os.path.commonprefix([bad_map_fname,
good_fname,
# pep_map_fname,
spec_fname,
gb_fname])
common_path = os.path.dirname(common_path)
#
# Reading genbank mindfully next ...
gbrecs = ms.genebank_fix_n_read(gb_fname,key_func_type='id')
######################################
# assign some module internal stuff ...
ms.gbrecs = gbrecs
#
# separator type choice is needed only for the ORIGINAL input files ...
if args.separator == "tab":
separator = '\t'
elif args.separator == "comma":
separator = ','
else:
separator = '\t'
#################
bad_info = pd.read_csv(bad_map_fname)
good_info = pd.read_csv(good_fname)
# pep_info = pd.read_csv(pep_map_fname)
spec_info = pd.read_csv(spec_fname,sep=separator)
# fix their peptide sequence thing right away ...
spec_info['pept'] = spec_info['Peptide sequence'].str.upper()
# pep_info['fetchid'] = pep_info['fetchid'].apply(int)
# bad_info['fetchid'] = bad_info['fetchid']#.apply(int) # WHY?????
if args.exp_num=='1':
bad_info['enzyme'] = 'T'
# #
# bad_info.fetchid.apply(lambda x: str(int(x)) if pd.notnull(x) else None)
# #
# fasta = SeqIO.to_dict(SeqIO.parse(fasta_fname,"fasta"),key_function=lambda _: _.id.split('|')[1])
# 1-BASED NOTATION FOR PROTEINS INDEXING ENFORCED ...
# pep_df = pd.read_csv(uniq_pept_fname)
###############################################
# connection between peptide info and spectrum info to be established ...
##########################################################################
# unroll that spec table to have 1 deamid per row ...
spec_info_unrolled = ms.unroll_by_mfunc(spec_info,'Variable modifications identified by spectrum',ms.extract_deamids,'deamid_info')
if spec_info_unrolled['Protein identification probability'].dtype != 'float':
spec_info_unrolled['prot_ident_probab'] = spec_info_unrolled['Protein identification probability'].str.strip('%').apply(float)
else:
spec_info_unrolled['prot_ident_probab'] = spec_info_unrolled['Protein identification probability']
####################################################################################################################
if spec_info_unrolled['Peptide identification probability'].dtype != 'float':
spec_info_unrolled['pept_ident_probab'] = spec_info_unrolled['Peptide identification probability'].str.strip('%').apply(float)
else:
spec_info_unrolled['pept_ident_probab'] = spec_info_unrolled['Peptide identification probability']
##########################################################
# so far the following merge seems to be 100% sufficient for the desired final output ...
# we could add on extra features if needed ...
spec_n_pep = spec_info_unrolled[['pept',
'deamid_info',
'prot_ident_probab',
'pept_ident_probab']].merge(bad_info,how='right',on='pept',suffixes=('','_x'))
# Now, extract those gsites ...
# dg_func = lambda x: pd.Series( ms.deamid_to_gsite(x['deamid_info'], x['start_fetched'], str(gbrecs[str(int(x['fetchid']))].seq)) )
def dg_func(row):
deamid_info = row['deamid_info']
start_fetched = row['start_fetched']
fetchacc = row['fetchacc']
if pd.notnull(start_fetched) and pd.notnull(fetchacc):
seq = gbrecs[fetchacc].seq
return pd.Series( ms.deamid_to_gsite(deamid_info, start_fetched, str(seq)) )
else:
return pd.Series({ 'gsite':"", 'gsite_seq':"", 'gstart':"" })
# and add them back to the main table ...
gs_res = spec_n_pep[['deamid_info','start_fetched','fetchacc']].apply( dg_func, axis=1 )
spec_n_pep = spec_n_pep.merge(gs_res,left_index=True,right_index=True)
print
print "Now we'd need to add theoretical glycosilation sites as a separate column ..."
print "full protein sequence and its length is added as well ..."
# this analysis must be done, once for each 'fetchid', and then merged back to the main table ...
get_theor_sites_fid = lambda facc: ms.get_theor_sites(str(gbrecs[facc].seq)) if pd.notnull(facc) else None
get_theor_sites_number_fid = lambda facc: ms.get_theor_sites_number(str(gbrecs[facc].seq)) if pd.notnull(facc) else None
theor_sites_info = lambda facc: pd.Series(
{'fetchacc':facc,
'gsites_predicted':get_theor_sites_fid(facc),
'gsites_predicted_number':get_theor_sites_number_fid(facc),
'prot_seq':str(gbrecs[facc].seq),
'prot_len':len(str(gbrecs[facc].seq))} ) if pd.notnull(facc) else None
###################################################
predicted_gsite_info = spec_n_pep['fetchacc'].dropna().drop_duplicates().apply(theor_sites_info)
# add back to the main table ...
spec_n_pep = spec_n_pep.merge(predicted_gsite_info,on='fetchacc',how='right')
print "done ..."
print "numbering appears to be 1-based and overall correct!"
print
# print " 'gsites_predicted' column uses 1-based numbering. Enforced and checked."
# SOME FINAL PREPARATIONS TO COMPLY WITH THE REQUESTED FORMAT ...
# extract gsite AAs as separate columns ...
spec_n_pep['gsite_AA1'] = spec_n_pep['gsite_seq'].str.get(0)
spec_n_pep['gsite_AA2'] = spec_n_pep['gsite_seq'].str.get(1)
spec_n_pep['gsite_AA3'] = spec_n_pep['gsite_seq'].str.get(2)
# locus protein_name uid Protein_ID_PERCENT peptides best_peptide Peptide_probability protease Expt_NUMBER prev_aa next_aa pept_start pept_stop Location match g_site gsite_start gsites_AA1_N gsites_AA2_XbutP gsites_AA3_TS Best Mascot Ion score Best Mascot Identity score Best Mascot Delta Ion score Prot_seq signal signal_loc tm_span protein length
requested_cols = ['gsite',
'pept',
'enzyme',
'start_fetched',
'prot_name',
'fetchid',
'fetchacc',
'uid_fetched',
'GN_fetched',
'pept_ident_probab',
'gsites_predicted',
'gsites_predicted_number',
'gsite_seq',
'gstart',
'gsite_AA1',
'gsite_AA2',
'gsite_AA3',
'signal',
'signal_loc',
'tm_span']
requested_cols = ['locus',
'prot_name',
'uid_fetched',
'prot_ident_probab',
# 'peptides', # THIS IS NEW STUFF ...
'pept',
'fetchid',
'fetchacc',
# 'best_pept', # THIS IS NEW STUFF ...
'pept_ident_probab', # BEWARE, pept ID % of the BEST PEPTIDE ...
'enzyme',
# 'experiment_num', # THIS IS NEW STUFF ...
###########################
'prev_aa',
'next_aa',
'prev_aa_fetched',
'next_aa_fetched',
'pept_start',
'pept_stop',
'start_fetched',
'stop_fetched',
###########################
'gsite_seq',
'gstart',
'gsite_AA1',
'gsite_AA2',
'gsite_AA3',
'Best Mascot Ion Score',
'Best Mascot Identity Score',
'Best Mascot Delta Ion Score',
'prot_seq', # PROTEIN SEQUENCE TO BE ADDED ...
'signal',
'signal_loc',
'tm_span',
'prot_len', # PROTEIN LENGTH TO BE ADDED ...
'SCORE',
'crit_pept_in']
# ###################################################
# # TO BE CONTINUED ...
THE_MOST_FINAL_DF = spec_n_pep[requested_cols].drop_duplicates().reset_index(drop=True)
# choose peptide with highest Pept_ident_probab
# Let's collpase (gsite,pept,fetchid) using the highest pept_ident_probab ...
THE_MOST_FINAL_DF_max_prob = THE_MOST_FINAL_DF.loc[THE_MOST_FINAL_DF.groupby(['gsite_seq','gstart','pept','fetchid','fetchacc','enzyme'],sort=False)['pept_ident_probab'].idxmax() ].reset_index(drop=True)
##################################################
# TO BE CONTINUED ....
#################################################
# BEST PEPTIDE CHOSING IS WORKING, FINISH ALL PEPTIDES AND OTHER STUFF ...
def choose_best_pept(df,dp_margin=1.0):
# pandas 0.18.1 broke something here: "TypeError: Series.name must be a hashable type" ...
# we're fixing it, introducing one more layer of control here ...
assert 'pept' in df.columns
assert 'pept_ident_probab' in df.columns
# this should work on any df with the 2 specified columns ...
if df.shape[0]>1:
shortes_idx = df['pept'].str.len().idxmin()
max_prob_idx = df['pept_ident_probab'].idxmax()
if df['pept_ident_probab'][shortes_idx] < (df['pept_ident_probab'][max_prob_idx]-dp_margin):
# return df['pept'][max_prob_idx]
# print "returning max_prob_idx",max_prob_idx
return max_prob_idx
else:
# return df['pept'][shortes_idx]
# print "returning shortes_idx",shortes_idx
return shortes_idx
else:
# print "returning one thing", df['pept_ident_probab'].idxmax()
return df['pept_ident_probab'].idxmax()
def unstack_pept(series):
return ','.join(series)
# fff=THE_MOST_FINAL_DF_max_prob.groupby(['gsite_seq','gstart','locus','enzyme'],sort=False).size()
# fff[fff>1]
# choosing BEST PEPTIDE containing given gsite
# according to the Reid's rule of best peptide ...
THE_MOST_FINAL_DF_uniq_pept = \
THE_MOST_FINAL_DF_max_prob.loc[
THE_MOST_FINAL_DF_max_prob.groupby(['gsite_seq',
'gstart',
'locus',
'enzyme'],sort=False).apply(choose_best_pept)
].reset_index(drop=True)
# given the best peptides are choosen, unstacking multiple peptides corresponding to the same gsite
# and forming a column 'peptides' ...
THE_MOST_FINAL_DF_uniq_pept['peptides'] = \
THE_MOST_FINAL_DF_max_prob.groupby(['gsite_seq',
'gstart',
'locus',
'enzyme'],sort=False)['pept'].apply(unstack_pept).reset_index(drop=True)
###############################
# RIGHT BEFORE FINAL REANMINGS, ASSIGNMENTS AND OTHER STUFF, ADD MISSING DATA ENTRIES HERE ...
###############################
# THE_MOST_FINAL_DF_uniq_pept = THE_MOST_FINAL_DF_uniq_pept.append(THE_MOST_FINAL_DF[THE_MOST_FINAL_DF['SCORE']<100])
THE_MOST_FINAL_DF_uniq_pept = THE_MOST_FINAL_DF_uniq_pept.append(bad_info[bad_info['fetchid'].isnull()])
###############################
###############################
# rename pept to best_pept AND enzyme to protease ...
THE_MOST_FINAL_DF_uniq_pept = THE_MOST_FINAL_DF_uniq_pept.rename(columns={'pept':'best_pept','enzyme':'protease',})
# add experiment number, something new ...
THE_MOST_FINAL_DF_uniq_pept['exp_num'] = int(args.exp_num)
# location match instead of fetched/Scaffold-based resutls ...
THE_MOST_FINAL_DF_uniq_pept['loc_match'] = THE_MOST_FINAL_DF_uniq_pept['pept_start'] == THE_MOST_FINAL_DF_uniq_pept['start_fetched']
THE_MOST_FINAL_DF_uniq_pept['loc_match'] = THE_MOST_FINAL_DF_uniq_pept['loc_match'].map({True:'Y',False:'N'})
requested_cols = ['locus',
'prot_name',
'uid_fetched',
'prot_ident_probab',
'peptides', # THIS IS NEW STUFF ...
# 'pept',
# 'fetchid',
'fetchacc',
'best_pept', # THIS IS NEW STUFF ...
'pept_ident_probab', # BEWARE, pept ID % of the BEST PEPTIDE ...
'protease',
'exp_num', # THIS IS NEW STUFF ...
###########################
'prev_aa',
'next_aa',
# 'prev_aa_fetched',
# 'next_aa_fetched',
'pept_start',
'pept_stop',
'loc_match',
# 'start_fetched',
# 'stop_fetched',
###########################
'gsite_seq',
'gstart',
'gsite_AA1',
'gsite_AA2',
'gsite_AA3',
'Best Mascot Ion Score',
'Best Mascot Identity Score',
'Best Mascot Delta Ion Score',
'prot_seq', # PROTEIN SEQUENCE TO BE ADDED ...
'signal',
'signal_loc',
'tm_span',
'prot_len', # PROTEIN LENGTH TO BE ADDED ...
'SCORE',
'crit_pept_in']
with open(os.path.join(common_path,'FINAL_combined_output.csv'),'w') as fp:
# output formatted BAD stuff first ...
THE_MOST_FINAL_DF_uniq_pept[requested_cols].to_csv(fp,index=False)
#
fp.write("\n\nBAD and ambigous entries are over. Entries that worked out to follow:\n\n")
#
good_info.to_csv(fp,index=False)
# THE_MOST_FINAL_DF_uniq.to_csv(os.path.join(common_path,'FINAL_gsite_anthology.csv'),index=False)
# # DESIREd COLUMNS ...
# # ############################################
# # # columns that needs to be delivered ... #
# # ############################################
# # # A gsites, 1 per line
# # # B pept, 1 per line
# # # B1 enzyme, G or T, derive from 'Biological sample category', like this: {'TrypsinSample1':'T','GluC_Sample2':'G'}
# # # C peptide_start, 1 per line accordingly
# # # D all_uids, REPLACE WITH col:H
# # # E prot_seq, try to get those from NCBI, not from UniProt ...
# # # F protein, ??? sequence, name or what???
# # # G uid_max, UID for major form instead or something like that ...
# # # H prot_name, parsed out human-readable name from 'Protein name'
# # # H1 gene_name, parsed out GN=xxx from 'Protein name'
# # # I uniq_peptide_count, discrad that column ...
# # # J pept_probability, output number not the string - this would be the criteria
# # # K gsites_predicted, OK
# # # L gsites_predicted_number, OK
# # # M gsite_start, beware of 0 or 1 type of indexing ...
# # # N,O,P - gsites AAs in separate columns
# # # M1, NOP combined, gsite sequence basically!
# # # Q signal, from GeneBank record on the protein, simply Y,N on whether there is a 'Signal' in gb.
# # # R signal_location, location of the signal from Q
# # # S tm_span, Y,N just for the fact of having TM span as a protein feature.
# # THINGS TO BE DONE:
# # 1) MERGE PEP_INFO WITH SPEC_INFO, SO THAT PEPT-PROT RELATIONSHIP WOULD BE SET UP ...
# # probably getting read of many-many columns in the spec_info along the way ...
# # 2) MODIFY AND TEST THE 'deamid_to_gsite' FUNCTION (PAY ATTENTION TO 1-BASED AND 0-BASED NUMBERING OF AAs)
# # 3) USE 'deamid_to_gsite' TO FINALLY EXTRACT GSITES ...
# # 4) ANALYSE(?) GSITES: GROUPBY UNIQUE GSITES (GSITE[seq],GSITE_START,PROT_IDENTIFICATOR) TO SEE HOW MANY PEPTS YIELD THE SAME GSITES ...
# # 5) SELECT A SINGLE PEPT PER GSITE??????? USING REAID'S CRITERIA, OR LEAVE ALL GSITE-PEPTS PAIRS ???????????
# #######################################
# # 6) COMPLY WITH THE #columns that needs to be delivered...# FOR THE VERY VERY FINAL OUTPUT ...
# #######################################
|
mit
|
dr-nate/msmbuilder
|
msmbuilder/cluster/agglomerative.py
|
3
|
11303
|
# Author: Robert McGibbon <[email protected]>
# Contributors: Brooke Husic <[email protected]>
# Copyright (c) 2016, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
import numpy as np
import six
import scipy.spatial.distance
import warnings
from msmbuilder import libdistance
from scipy.cluster.hierarchy import fcluster
from sklearn.utils import check_random_state
from sklearn.base import ClusterMixin, TransformerMixin
from . import MultiSequenceClusterMixin
from ..base import BaseEstimator
from fastcluster import linkage
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
__all__ = ['_LandmarkAgglomerative']
def ward_pooling_function(x, cluster_cardinality, intra_cluster_sum):
normalization_factor = cluster_cardinality*(cluster_cardinality+1)/2
squared_sums = (x**2).sum(axis=1)
result_vector = ((cluster_cardinality * squared_sums -
intra_cluster_sum) / normalization_factor)
return result_vector
POOLING_FUNCTIONS = {
'average': lambda x, ignore1, ignore2: np.mean(x, axis=1),
'complete': lambda x, ignore1, ignore2: np.max(x, axis=1),
'single': lambda x, ignore1, ignore2: np.min(x, axis=1),
'ward': ward_pooling_function,
}
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def pdist(X, metric='euclidean'):
if isinstance(metric, six.string_types):
return libdistance.pdist(X, metric)
n = len(X)
d = np.empty((n, n))
for i in range(n):
d[i, :] = metric(X, X, i)
return scipy.spatial.distance.squareform(d, checks=False)
def cdist(XA, XB, metric='euclidean'):
if isinstance(metric, six.string_types):
return libdistance.cdist(XA, XB, metric)
nA, nB = len(XA), len(XB)
d = np.empty((nA, nB))
for i in range(nA):
d[i, :] = metric(XB, XA, i)
return d
#-----------------------------------------------------------------------------
# Main Code
#-----------------------------------------------------------------------------
class _LandmarkAgglomerative(ClusterMixin, TransformerMixin):
"""Landmark-based agglomerative hierarchical clustering
Landmark-based agglomerative clustering is a simple scalable version of
"standard" hierarchical clustering which doesn't require computing the full
matrix of pairwise distances between all data points. The idea is
basically to subsample only ``n_landmarks`` "landmark"
data points, cluster them, and then assign labels to the remaining data
points based on their distances to (and the labels of) the landmarks.
Parameters
----------
n_clusters : int
The number of clusters to find.
n_landmarks : int, optional
Memory-saving approximation. Instead of actually clustering every
point, we instead select n_landmark points either randomly or by
striding the data matrix (see ``landmark_strategy``). Then we cluster
the only the landmarks, and then assign the remaining dataset based
on distances to the landmarks. Note that n_landmarks=None is equivalent
to using every point in the dataset as a landmark.
linkage : {'single', 'complete', 'average', 'ward'}, default='average'
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
- single uses the minimum distance between all observations of the
two sets.
- ward linkage minimizes the within-cluster variance
The linkage also effects the predict() method and the use of landmarks.
After computing the distance from each new data point to the landmarks,
the new data point will be assigned to the cluster that minimizes the
linkage function between the new data point and each of the landmarks.
(i.e with ``single``, new data points will be assigned the label of
the closest landmark, with ``average``, it will be assigned the label
of the landmark s.t. the mean distance from the test point to all the
landmarks with that label is minimized, etc.)
metric : string or callable, default= "euclidean"
Metric used to compute the distance between samples.
landmark_strategy : {'stride', 'random'}, default='stride'
Method for determining landmark points. Only matters when n_landmarks
is not None. "stride" takes landmarks every n-th data point in X, and
random selects them uniformly at random.
random_state : integer or numpy.RandomState, optional
The generator used to select random landmarks. Only used if
landmark_strategy=='random'. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
max_landmarks : int, optional, default=None
Useful for hyperparameter searching. If n_clusters exceeds n_landmarks,
max_landmarks will be used. Otherwise, n_landmarks will be used. If
None, no cutoff is enforced on n_landmarks, which may result in memory
issues.
ward_predictor : {'single', 'complete', 'average', 'ward'}, default='ward'
Which criterion to use when predicting cluster assignments after
fitting with ward linkage.
References
----------
.. [1] Mullner, D. "Modern hierarchical, agglomerative clustering
algorithms." arXiv:1109.2378 (2011).
Attributes
----------
landmark_labels_
landmarks_
"""
def __init__(self, n_clusters, n_landmarks=None, linkage='average',
metric='euclidean', landmark_strategy='stride',
random_state=None, max_landmarks=None, ward_predictor='ward'):
self.n_clusters = n_clusters
self.n_landmarks = n_landmarks
self.metric = metric
self.landmark_strategy = landmark_strategy
self.random_state = random_state
self.linkage = linkage
self.max_landmarks = max_landmarks
self.ward_predictor = ward_predictor
self.landmark_labels_ = None
self.landmarks_ = None
def fit(self, X, y=None):
"""
Compute agglomerative clustering.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Returns
-------
self
"""
if self.max_landmarks is not None:
if self.n_clusters > self.n_landmarks:
self.n_landmarks = self.max_landmarks
if self.n_landmarks is None:
distances = pdist(X, self.metric)
tree = linkage(distances, method=self.linkage)
self.landmark_labels_ = fcluster(tree, criterion='maxclust',
t=self.n_clusters) - 1
self.cardinality_ = np.bincount(self.landmark_labels_)
self.squared_distances_within_cluster_ = np.zeros(self.n_clusters)
n = len(X)
for k in range(len(distances)):
i = int(n - 2 - np.floor(np.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5))
j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2)
if self.landmark_labels_[i] == self.landmark_labels_[j]:
self.squared_distances_within_cluster_[
self.landmark_labels_[i]] += distances[k] ** 2
self.landmarks_ = X
else:
if self.landmark_strategy == 'random':
land_indices = check_random_state(self.random_state).randint(
len(X), size=self.n_landmarks)
else:
land_indices = np.arange(len(X))[::(len(X) //
self.n_landmarks)][:self.n_landmarks]
distances = pdist(X[land_indices], self.metric)
tree = linkage(distances, method=self.linkage)
self.landmark_labels_ = fcluster(tree, criterion='maxclust',
t=self.n_clusters) - 1
self.cardinality_ = np.bincount(self.landmark_labels_)
self.squared_distances_within_cluster_ = np.zeros(self.n_clusters)
n = len(X[land_indices])
for k in range(len(distances)):
i = int(n - 2 - np.floor(np.sqrt(-8*k + 4*n*(n-1)-7)/2.0 - 0.5))
j = int(k + i + 1 - n*(n-1)/2 + (n-i)*((n-i)-1)/2)
if self.landmark_labels_[i] == self.landmark_labels_[j]:
self.squared_distances_within_cluster_[
self.landmark_labels_[i]] += distances[k] ** 2
self.landmarks_ = X[land_indices]
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
dists = cdist(X, self.landmarks_, self.metric)
pfunc_name = self.ward_predictor if self.linkage == 'ward' else self.linkage
try:
pooling_func = POOLING_FUNCTIONS[pfunc_name]
except KeyError:
raise ValueError("linkage {} is not supported".format(pfunc_name))
pooled_distances = np.empty(len(X))
pooled_distances.fill(np.infty)
labels = np.zeros(len(X), dtype=int)
for i in range(self.n_clusters):
if np.any(self.landmark_labels_ == i):
d = pooling_func(dists[:, self.landmark_labels_ == i],
self.cardinality_[i],
self.squared_distances_within_cluster_[i])
if np.any(d < 0):
warnings.warn("Distance shouldn't be negative.")
mask = (d < pooled_distances)
pooled_distances[mask] = d[mask]
labels[mask] = i
else:
print("No data points were assigned to cluster {}".format(i))
return labels
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
self.fit(X)
return self.predict(X)
class LandmarkAgglomerative(MultiSequenceClusterMixin, _LandmarkAgglomerative,
BaseEstimator):
__doc__ = _LandmarkAgglomerative.__doc__
_allow_trajectory = True
|
lgpl-2.1
|
welch/seasonal
|
examples/hw.py
|
1
|
7312
|
#
# holt-winters forecasting
#
# H-W is a structural timeseries model with level, trend, and seasonal
# components that are estimated by exponential smoothing as data
# arrives (https://en.wikipedia.org/wiki/Exponential_smoothing).
# Only the additive seasonality model is implemented.
#
from sys import stderr
from copy import deepcopy
from collections import namedtuple
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import seasonal
HWState = namedtuple('HWState', 't level trend seasons')
HWParams = namedtuple('HWParams', 'alpha beta gamma')
def estimate_state(data):
"""estimate initial state for Holt Winters
HWState estimates are for t=-1, the step before y[0].
Parameters
----------
data : ndarray
observations
"""
seasons, trended = seasonal.fit_seasons(data)
if seasons is None:
seasons = np.zeros(1)
trend = trended[1] - trended[0]
level = trended[0] - trend
return HWState(-1, level, trend, seasons)
def forecast(state, steps=1):
"""return a single or multi-step forecast from the current state
Parameters
----------
state : HWState
current model state
steps : int
number of steps out to forecast
"""
season = state.seasons[(state.t + steps) % len(state.seasons)]
return state.level + state.trend * steps + season
def advance(y, params, state):
"""incorporate the next observation into the state estimate.
This returns updated state, using Hyndman's error correction form of H-W [1]
It mutates state's seasonal array.
Parameters
----------
y : float
observed value at time state.t + 1
params : HWParams
alpha, beta, gamma params for HW
state : HWState
current HW state
Returns
-------
state, err : HWState, float
state: updated state
one-step forecast error for y
References
----------
.. [1] https://www.otexts.org/fpp/7/5, Holt-Winters additive method
"""
seasons = state.seasons
e = y - forecast(state)
level = state.level + state.trend + params.alpha * e
trend = state.trend + params.alpha * params.beta * e
seasons[(state.t + 1) % len(state.seasons)] += params.gamma * e
# in a proper implementation, we would enforce seasons being 0-mean.
return HWState(state.t+1, level, trend, seasons), e
def estimate_params(data, state, alpha0=0.3, beta0=0.1, gamma0=0.1):
"""Estimate Holt Winters parameters from data
Parameters
----------
data : ndarray
observations
state : HWState
initial state for HW (one step prior to first data value)
alpha0, beta0, gamma0 : float, float, float
initial guess for HW parameters
Returns
-------
params : HWParams
Notes
-----
This is a not a demo about estimating Holt Winters parameters, and
this is not a great way to go about it, because it does not
produce good out-of-sample error. In this demo, we unrealistically
train the HW parameters over all the data, not just the training
prefix used for the initial seasonal state estimate.
"""
def _forecast_error(x0, state, data):
"""bfgs HW parameter error callback."""
E = 0
state = deepcopy(state)
params = HWParams(*x0)
for y in data:
state, e = advance(y, params, state)
E += e * e
return E / len(data)
alpha, beta, gamma = fmin_l_bfgs_b(
_forecast_error, x0=[alpha0, beta0, gamma0], bounds=[[0, 1]] * 3,
args=(state, data), approx_grad=True)[0]
return HWParams(alpha, beta, gamma)
def hw(data, split=None, params=None):
"""fit a HW model and return the 1-step forecast and smoothed series.
Parameters
----------
data : array of float
observations
split : number
initialize using the leading split*100% of the data (if split <=1.0)
or N=split points (if split > 1)
Returns
-------
forecast, smoothed : ndarray, ndarray
"""
if split is None:
splitidx = len(data)
elif split > 1.0:
splitidx = int(split)
else:
splitidx = int(split * len(data))
state = estimate_state(data[:splitidx])
print "||seasons|| = {:.3f}".format(np.sqrt(np.sum(state.seasons ** 2)))
if params is None:
params = estimate_params(data, state)
print "estimated alpha={:.3f}, beta={:.3f}, gamma={:.3f}".format(*params)
level = np.empty(len(data))
fcast = np.empty(len(data))
for y in data:
yhat = forecast(state)
state, _ = advance(y, params, state)
level[state.t], fcast[state.t] = state.level, yhat
print "RMSE = ", np.sqrt(np.sum((fcast - data) ** 2) / len(data))
print "final ||seasons|| = {:.3f}".format(np.sqrt(np.sum(state.seasons ** 2)))
return fcast, level
def main():
import os.path
from optparse import OptionParser
from textwrap import dedent
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
from seasonal.application import read_csv
parser = OptionParser(
usage="usage: %prog [options] csv-files...",
description="Holt-Winters demo using the seasonal package"
)
parser.add_option("--column", default=-1,
help="csv column to use (name or 0-based index, default rightmost)")
parser.add_option("--split", type="float", default=None,
help=("split data at the split*100% or int(split) " +
"point for initialization"))
parser.add_option("--params",
help=("comma-separated list of alpha, beta, gamma. " +
"default is to estimate these from ALL the data"))
parser.add_option("--demo", action="store_true",
help="demonstrate with some air passenger data")
(options, args) = parser.parse_args()
if options.demo:
args = [os.path.join(os.path.dirname(seasonal.__file__),
"data/airPassengers.csv")]
if options.split is None:
options.split = 0.20
if not args:
parser.print_help()
exit(-1)
if not plt:
stderr.write(
"Error: matplotlib must be installed\n")
exit(-1)
if options.params is not None:
try:
params = [float(p) for p in options.params.split(',')]
options.params = HWParams(*params)
except Exception:
stderr.write("\nError: --params wants alpha,beta,gamma\n")
parser.print_help()
exit(-1)
for csvpath in args:
index, data, column = read_csv(csvpath, column=options.column)
fcast, smoothed = hw(data, options.split, params=options.params)
plt.figure(1)
plt.subplot(211)
plt.title("Holt Winters for "+os.path.basename(csvpath))
plt.plot(index, data, label=column)
plt.plot(index, fcast, label="forecast")
plt.plot(index, smoothed, label="smoothed")
leg = plt.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
plt.subplot(212)
plt.title("Forecast Error")
plt.plot(index, fcast - data)
plt.show()
if __name__ == "__main__":
main()
|
mit
|
meduz/scikit-learn
|
examples/model_selection/plot_roc.py
|
102
|
5056
|
"""
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
hrjn/scikit-learn
|
benchmarks/bench_lof.py
|
49
|
3548
|
"""
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
print(__doc__)
np.random.seed(2)
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ['shuttle']
novelty_detection = True # if False, training set polluted by outliers
for dataset_name in datasets:
# loading and vectorization
print('loading data')
if dataset_name in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dataset_name, shuffle=True,
percent10=False)
X = dataset.data
y = dataset.target
if dataset_name == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
X, y = sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dataset_name == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dataset_name == 'http' or dataset_name == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
if novelty_detection:
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
print('LocalOutlierFactor processing...')
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = -model.decision_function(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1,
label=('ROC for %s (area = %0.3f, train-time: %0.2fs,'
'test-time: %0.2fs)' % (dataset_name, AUC, fit_time,
predict_time)))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
loli/sklearn-ensembletrees
|
benchmarks/bench_plot_neighbors.py
|
10
|
6499
|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure('scikit-learn nearest neighbors benchmark results',
figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
|
bsd-3-clause
|
bobquest33/peach
|
tutorial/optimization/quasi-newton-optimization.py
|
6
|
3006
|
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: quasi-newton-optimization.py
# Optimization of two-variable functions by quasi-newton methods
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Gradient of Rosenbrock function
def df(xy):
x, y = xy
return array( [ -2.*(1.-x) - 4.*x*(y - x*x), 2.*(y - x*x) ])
# We will allow no more than 200 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 200
# The first estimate of the minimum is given by the DFP method. Notice that the
# creation of the optimizer is virtually the same as every other. We could, in
# this and in the other optimizers, omit the derivative function and let Peach
# estimate it for us.
dfp = p.DFP(f, (0.1, 0.2), df=df)
xd = [ 0.1 ]
yd = [ 0.2 ]
i = 0
while i < iMax:
x, e = dfp.step()
xd.append(x[0])
yd.append(x[1])
i = i + 1
xd = array(xd)
yd = array(yd)
# We now try the BFGS optimizer.
bfgs = p.BFGS(f, (0.1, 0.2), df=df)
xb = [ 0.1 ]
yb = [ 0.2 ]
i = 0
while i < iMax:
x, e = bfgs.step()
xb.append(x[0])
yb.append(x[1])
i = i + 1
xb = array(xb)
yb = array(yb)
# Last but not least, the SR1 optimizer
sr1 = p.SR1(f, (0.1, 0.2), df=df)
xs = [ 0.1 ]
ys = [ 0.2 ]
i = 0
while i < iMax:
x, e = sr1.step()
xs.append(x[0])
ys.append(x[1])
i = i + 1
xs = array(xs)
ys = array(ys)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``quasi-newton-optimization.png``.
# These commands are used to create the functions that will plot the contour
# lines of the Rosenbrock function.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = (1-x)**2 + (y-x*x)**2
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(6, 6)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xd, yd)
a1.plot(xb, yb)
a1.plot(xs, ys)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.legend([ 'DFP', 'BFGS', 'SR1' ])
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("quasi-newton-optimization.png")
except ImportError:
print "DFP Optimizer: ", (xd[-1], yd[-1])
print "BFGS Optimizer: ", (xb[-1], yb[-1])
print "SR1 Optimizer: ", (xs[-1], ys[-1])
|
lgpl-2.1
|
ishank08/scikit-learn
|
sklearn/datasets/__init__.py
|
61
|
3734
|
"""
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
|
bsd-3-clause
|
phdowling/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
218
|
3893
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
radiasoft/radtrack
|
tests/ProcessSRWOutput_test.py
|
1
|
3153
|
# -*- coding: utf-8 -*-
#C:\d from old\RadiaBeam\RadSoft\radtrack\tests>py.test ProcessSRWOutput_test.py
u"""PyTest for :mod:`radiasoft.AnalyticCalc`
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from io import open
import os
import numpy as np
from matplotlib.pylab import figure, plot, grid,plt
import pytest
from pykern.pkdebug import pkdc, pkdp
from pykern import pkunit
from pykern import pkyaml
from radtrack.srw import AnalyticCalc
def str2num(s):
return float(s) if "." in s else int(s)
def maxelements(seq):
''' Return list of position(s) of largest element '''
max_indices = []
if seq:
max_val = seq[0]
for i,val in ((i,val) for i,val in enumerate(seq) if val >= max_val):
if val == max_val:
max_indices.append(i)
else:
max_val = val
max_indices = [i]
return max_indices
def FindingArrayMaxima(seq, deltI):
pkdc('size:',np.shape(seq)[0])
if seq:
j=0
maxV=[]
maxI=[]
maxIj=0
for i in xrange(0, np.shape(seq)[0]-deltI-1):
pkdc(i, seq[i])
if seq[i+1]>seq[i]:
maxVj=seq[i+1]
maxIj=i+1
pkdc(maxIj)
if (maxIj>0) & (i>maxIj+deltI):
j=j+1
maxV.append(maxVj)
maxI.append(maxIj)
maxIj=1000000
else:
print('Input array is empty')
pkdc('Maximum # and Intensity of UR harmoncis: ',maxI,maxV)
return (maxV, maxI)
def Path_Length(x,y):
Path_Len=0
for i in xrange(0, np.shape(x)[0]-1):
Path_Len=Path_Len+ np.sqrt((y[i+1]-y[i])**2*1E-6 + (x[i+1]-x[i])**2)
return (Path_Len)
def test_1():
d = pkunit.data_dir()
## Testing actual SRW calculations
##Reading SRW data SPECTRUM
IFileName="Spectrum.txt"
f=open(str(d.join(IFileName)),"r")#,1000)
e_p=[]
I_rad=[]
for line in f.readlines():
words = line.split()
e_p.append(words[0])
I_rad.append(words[1])
I_radf=map(float,I_rad)
maxI=max(I_radf)
pkdc(I_radf)
print('Spectral Amplitude, ph/s/mrad2',maxI)
pkdc(I_radf.index(max(I_radf)))
maxIn=maxelements(I_radf)
(maxV, maxI)=FindingArrayMaxima(I_radf,5)
print(maxI, maxV)
f.close()
##Reading SRW data TRAJECTORY
IFileName="Trajectory.txt"
f=open(str(d.join(IFileName)),"r")#,10000)
z_dist=[]
x_traj=[]
for line in f.readlines():
words = line.split()
z_dist.append(words[0])
x_traj.append(words[1])
x_trajectory=map(float, x_traj)
z_distance=map(float, z_dist)
minX=min(x_trajectory)
maxX=max(x_trajectory)
minZ=min(z_distance)
maxZ=max(z_distance)
print ('Length of ID, m', maxZ-minZ)
print('Oscillation Amplitude, mm',(maxX-minX)/2)
L_trajectory=Path_Length(z_distance, x_trajectory)
print('Length of Trajectory, m', L_trajectory)
f.close()
##Plotting
plot(e_p,I_rad)
j=0
for i in maxI:
plt.scatter(e_p[i], maxV[j], color='red')
j=j+1
# title(TitleP)
# xlabel(Xlab)
# ylabel(Ylab)
grid()
plt.show(block=False)
plot(z_dist,x_trajectory,'.b',linestyle="-")
(maxVt, maxIt)=FindingArrayMaxima(map(float,x_trajectory),20)
pkdc(maxIt, maxVt)
j=0
for i in maxIt:
plt.scatter(z_dist[i], maxVt[j], color='red')
j=j+1
grid()
plt.show(block=False)
|
apache-2.0
|
fredhusser/scikit-learn
|
sklearn/setup.py
|
225
|
2856
|
import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
tmhm/scikit-learn
|
examples/svm/plot_svm_scale_c.py
|
223
|
5375
|
"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
thunderhoser/GewitterGefahr
|
gewittergefahr/plotting/radar_plotting.py
|
1
|
35709
|
"""Plotting methods for radar data."""
import copy
import numpy
from matplotlib import pyplot
import matplotlib.colors
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.plotting import plotting_utils
SHEAR_VORT_DIV_NAMES = [
radar_utils.VORTICITY_NAME, radar_utils.DIVERGENCE_NAME,
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME
]
METRES_TO_KM = 0.001
KM_TO_KILOFEET = 3.2808
PER_SECOND_TO_PER_KILOSECOND = 1e3
DEFAULT_FONT_SIZE = 20
DEFAULT_OPACITY = 1.
DEFAULT_COLOUR_BAR_LENGTH = 0.8
TEXT_BOUNDING_BOX_DICT = {
'facecolor': 'white',
'alpha': 0.7,
'edgecolor': 'black',
'linewidth': 2,
'boxstyle': 'round'
}
def _get_friendly_colours():
"""Returns colours in colourblind-friendly scheme used by GridRad viewer.
:return: colour_list: 1-D list, where each element is a numpy array with the
[R, G, B] values in that order.
"""
colour_list = [
[242, 247, 233], [220, 240, 212], [193, 233, 196], [174, 225, 196],
[156, 218, 205], [138, 200, 211], [122, 163, 204], [106, 119, 196],
[112, 92, 189], [137, 78, 182], [167, 64, 174], [167, 52, 134],
[160, 41, 83], [153, 30, 30]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
return colour_list
def _get_modern_colours():
"""Returns colours in "modern" scheme used by GridRad viewer.
:return: colour_list: See doc for `_get_friendly_colours`.
"""
colour_list = [
[0, 0, 0], [64, 64, 64], [131, 131, 131], [0, 24, 255],
[0, 132, 255], [0, 255, 255], [5, 192, 127], [5, 125, 0],
[105, 192, 0], [255, 255, 0], [255, 147, 8], [255, 36, 15],
[255, 0, 255], [255, 171, 255]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
return colour_list
def _get_reflectivity_colour_scheme():
"""Returns colour scheme for reflectivity.
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[4, 233, 231], [1, 159, 244], [3, 0, 244], [2, 253, 2],
[1, 197, 1], [0, 142, 0], [253, 248, 2], [229, 188, 0],
[253, 149, 0], [253, 0, 0], [212, 0, 0], [188, 0, 0],
[248, 0, 253], [152, 84, 198]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_dbz = numpy.array([
0.1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_dbz, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_zdr_colour_scheme():
"""Returns colour scheme for Z_DR (differential reflectivity).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = _get_modern_colours()
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_bounds_db = numpy.array([
-1, -0.5, 0, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2, 2.5, 3, 3.5, 4
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_db, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_kdp_colour_scheme():
"""Returns colour scheme for K_DP (specific differential phase).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = _get_modern_colours()
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_bounds_deg_km01 = numpy.array([
-1, -0.5, 0, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2, 2.5, 3, 3.5, 4
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_deg_km01, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_rho_hv_colour_scheme():
"""Returns colour scheme for rho_hv (cross-polar correlation coefficient).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = _get_modern_colours()
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_unitless = numpy.array([
0.7, 0.75, 0.8, 0.85, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97,
0.98, 0.99, 1
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_unitless, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_spectrum_width_colour_scheme():
"""Returns colour scheme for velocity-spectrum width.
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = _get_friendly_colours()
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_m_s01 = numpy.array([
0.1, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 6, 7, 8, 9, 10
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_m_s01, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_vorticity_colour_scheme():
"""Returns colour scheme for vorticity.
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[0, 0, 76.5], [0, 0, 118.5], [0, 0, 163.3], [0, 0, 208.1],
[0, 0, 252.9], [61, 61, 255], [125, 125, 255], [189, 189, 255],
[253, 253, 255], [255, 193, 193], [255, 129, 129], [255, 65, 65],
[255, 1, 1], [223.5, 0, 0], [191.5, 0, 0], [159.5, 0, 0],
[127.5, 0, 0], [95.5, 0, 0]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_bounds_ks01 = numpy.array([
-7, -6, -5, -4, -3, -2, -1.5, -1, -0.5, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_ks01, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_az_shear_colour_scheme():
"""Returns colour scheme for azimuthal shear.
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[0, 0, 76.5], [0, 0, 118.5], [0, 0, 163.3], [0, 0, 208.1],
[0, 0, 252.9], [61, 61, 255], [125, 125, 255], [189, 189, 255],
[253, 253, 255], [255, 193, 193], [255, 129, 129], [255, 65, 65],
[255, 1, 1], [223.5, 0, 0], [191.5, 0, 0], [159.5, 0, 0],
[127.5, 0, 0], [95.5, 0, 0]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_bounds_ks01 = 2 * numpy.array([
-7, -6, -5, -4, -3, -2, -1.5, -1, -0.5, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_ks01, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_divergence_colour_scheme():
"""Returns colour scheme for divergence.
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
return _get_vorticity_colour_scheme()
def _get_echo_top_colour_scheme():
"""Returns colour scheme for echo top.
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[120, 120, 120], [16, 220, 244], [11, 171, 247], [9, 144, 202],
[48, 6, 134], [4, 248, 137], [10, 185, 6], [1, 241, 8],
[255, 186, 1], [255, 251, 0], [132, 17, 22], [233, 16, 1]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_kft = numpy.array([
0.1, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_kft, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_mesh_colour_scheme():
"""Returns colour scheme for MESH (maximum estimated size of hail).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[152, 152, 152], [152, 203, 254], [0, 152, 254], [0, 45, 254],
[0, 101, 0], [0, 152, 0], [0, 203, 0], [254, 254, 50],
[254, 203, 0], [254, 152, 0], [254, 0, 0], [254, 0, 152],
[152, 50, 203]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_mm = numpy.array([
0.1, 15.9, 22.2, 28.6, 34.9, 41.3, 47.6, 54, 60.3, 65, 70, 75, 80, 85
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_mm, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_shi_colour_scheme():
"""Returns colour scheme for SHI (severe-hail index).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[152, 152, 152], [152, 203, 254], [0, 152, 254], [0, 45, 254],
[0, 101, 0], [0, 152, 0], [0, 203, 0], [254, 254, 50],
[254, 203, 0], [254, 152, 0], [254, 0, 0], [254, 0, 152],
[152, 50, 203], [101, 0, 152]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_unitless = numpy.array([
1, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360, 390, 420
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_unitless, colour_map_object.N)
return colour_map_object, colour_norm_object
def _get_vil_colour_scheme():
"""Returns colour scheme for VIL (vertically integrated liquid).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[16, 71, 101], [0, 99, 132], [46, 132, 181], [74, 166, 218],
[122, 207, 255], [179, 0, 179], [222, 83, 222], [255, 136, 255],
[253, 191, 253], [255, 96, 0], [255, 128, 32], [255, 208, 0],
[180, 0, 0], [224, 0, 0]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_mm = numpy.array([
0.1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_mm, colour_map_object.N)
return colour_map_object, colour_norm_object
def _field_to_plotting_units(field_matrix, field_name):
"""Converts radar field from default units to plotting units.
:param field_matrix: numpy array in default units.
:param field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:return: new_field_matrix: Same as input, except in plotting units.
"""
radar_utils.check_field_name(field_name)
if field_name in radar_utils.ECHO_TOP_NAMES:
return field_matrix * KM_TO_KILOFEET
if field_name in SHEAR_VORT_DIV_NAMES:
return field_matrix * PER_SECOND_TO_PER_KILOSECOND
return field_matrix
def _field_name_to_plotting_units(field_name):
"""Converts field *name* from default units to plotting units.
:param field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:return: new_field_name: Same as input, except in plotting units.
"""
radar_utils.check_field_name(field_name)
if field_name in radar_utils.ECHO_TOP_NAMES:
return field_name.replace('_km', '_kft')
if field_name in SHEAR_VORT_DIV_NAMES:
return field_name.replace('_s01', '_ks01')
return field_name
def field_name_to_verbose(field_name, include_units=True):
"""Converts field name from default format to verbose.
:param field_name: See doc for `radar_utils.field_name_to_verbose`.
:param include_units: Same.
:return: field_name_verbose: Same.
"""
field_name_verbose = radar_utils.field_name_to_verbose(
field_name=field_name, include_units=include_units)
return field_name_verbose.replace('(m ASL)', '(kft ASL)').replace(
'(s', '(ks')
def layer_operations_to_names(
list_of_layer_operation_dicts, include_units=True):
"""Converts list of layer operations to list of field and panel names.
P = number of layer operations = number of panels
:param list_of_layer_operation_dicts: See doc for
`input_examples.reduce_examples_3d_to_2d`.
:param include_units: Boolean flag. If True, panel names will include
units.
:return: field_name_by_panel: length-P list with names of radar fields.
:return: panel_names: length-P list of panel names (to be printed at bottoms
of panels).
"""
error_checking.assert_is_boolean(include_units)
num_panels = len(list_of_layer_operation_dicts)
field_name_by_panel = [''] * num_panels
panel_names = [''] * num_panels
for i in range(num_panels):
this_operation_dict = list_of_layer_operation_dicts[i]
field_name_by_panel[i] = this_operation_dict[
input_examples.RADAR_FIELD_KEY]
this_field_name_verbose = field_name_to_verbose(
field_name=field_name_by_panel[i], include_units=include_units)
this_min_height_km_agl = int(numpy.round(
this_operation_dict[input_examples.MIN_HEIGHT_KEY] * METRES_TO_KM
))
this_max_height_km_agl = int(numpy.round(
this_operation_dict[input_examples.MAX_HEIGHT_KEY] * METRES_TO_KM
))
panel_names[i] = '{0:s}\n{1:s} from {2:d}-{3:d} km AGL'.format(
this_field_name_verbose,
this_operation_dict[input_examples.OPERATION_NAME_KEY].upper(),
this_min_height_km_agl, this_max_height_km_agl
)
return field_name_by_panel, panel_names
def fields_and_heights_to_names(
field_names, heights_m_agl, include_units=True):
"""Converts list of radar field/height pairs to panel names.
P = number of panels
:param field_names: length-P list with names of radar fields. Each must be
accepted by `radar_utils.check_field_name`.
:param heights_m_agl: length-P numpy array of heights (metres above ground
level).
:param include_units: Boolean flag. If True, panel names will include
units.
:return: panel_names: length-P list of panel names (to be printed at bottoms
of panels).
"""
error_checking.assert_is_boolean(include_units)
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), num_dimensions=1)
num_panels = len(field_names)
error_checking.assert_is_numpy_array(
heights_m_agl, exact_dimensions=numpy.array([num_panels])
)
error_checking.assert_is_geq_numpy_array(heights_m_agl, 0.)
heights_m_agl = numpy.round(heights_m_agl).astype(int)
panel_names = [''] * num_panels
for i in range(num_panels):
this_field_name_verbose = field_name_to_verbose(
field_name=field_names[i], include_units=include_units)
panel_names[i] = '{0:s}\nat {1:d} km AGL'.format(
this_field_name_verbose,
int(numpy.round(heights_m_agl[i] * METRES_TO_KM))
)
return panel_names
def get_default_colour_scheme(field_name, opacity=DEFAULT_OPACITY):
"""Returns default colour scheme for radar field.
:param field_name: Field name (must be accepted by
`radar_utils.check_field_name`).
:param opacity: Opacity (in range 0...1).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
radar_utils.check_field_name(field_name)
error_checking.assert_is_greater(opacity, 0.)
error_checking.assert_is_leq(opacity, 1.)
colour_map_object = None
colour_norm_object = None
if field_name in radar_utils.REFLECTIVITY_NAMES:
colour_map_object, colour_norm_object = (
_get_reflectivity_colour_scheme()
)
elif field_name in radar_utils.SHEAR_NAMES:
colour_map_object, colour_norm_object = _get_az_shear_colour_scheme()
elif field_name in radar_utils.ECHO_TOP_NAMES:
colour_map_object, colour_norm_object = _get_echo_top_colour_scheme()
elif field_name == radar_utils.MESH_NAME:
colour_map_object, colour_norm_object = _get_mesh_colour_scheme()
elif field_name == radar_utils.SHI_NAME:
colour_map_object, colour_norm_object = _get_shi_colour_scheme()
elif field_name == radar_utils.VIL_NAME:
colour_map_object, colour_norm_object = _get_vil_colour_scheme()
elif field_name == radar_utils.DIFFERENTIAL_REFL_NAME:
colour_map_object, colour_norm_object = _get_zdr_colour_scheme()
elif field_name == radar_utils.SPEC_DIFF_PHASE_NAME:
colour_map_object, colour_norm_object = _get_kdp_colour_scheme()
elif field_name == radar_utils.CORRELATION_COEFF_NAME:
colour_map_object, colour_norm_object = _get_rho_hv_colour_scheme()
elif field_name == radar_utils.SPECTRUM_WIDTH_NAME:
colour_map_object, colour_norm_object = (
_get_spectrum_width_colour_scheme()
)
elif field_name == radar_utils.VORTICITY_NAME:
colour_map_object, colour_norm_object = _get_vorticity_colour_scheme()
elif field_name == radar_utils.DIVERGENCE_NAME:
colour_map_object, colour_norm_object = _get_divergence_colour_scheme()
num_colours = len(colour_map_object.colors)
for i in range(num_colours):
colour_map_object.colors[i] = matplotlib.colors.to_rgba(
colour_map_object.colors[i], opacity
)
return colour_map_object, colour_norm_object
def plot_latlng_grid(
field_matrix, field_name, axes_object, min_grid_point_latitude_deg,
min_grid_point_longitude_deg, latitude_spacing_deg,
longitude_spacing_deg, colour_map_object=None, colour_norm_object=None,
refl_opacity=DEFAULT_OPACITY):
"""Plots lat-long grid as colour map.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
Because this method plots a lat-long grid (rather than an x-y grid), if you
have used Basemap to plot borders or anything else, the only acceptable
projection is cylindrical equidistant (in which x = longitude and
y = latitude, so no coordinate conversion is necessary).
To use the default colour scheme for the given radar field, leave
`colour_map_object` and `colour_norm_object` empty.
:param field_matrix: M-by-N numpy array with values of radar field.
:param field_name: Name of radar field (must be accepted by
`radar_utils.check_field_name`).
:param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`.
:param min_grid_point_latitude_deg: Minimum latitude (deg N) over all grid
points. This should be the latitude in the first row of `field_matrix`
-- i.e., at `field_matrix[0, :]`.
:param min_grid_point_longitude_deg: Minimum longitude (deg E) over all grid
points. This should be the longitude in the first column of
`field_matrix` -- i.e., at `field_matrix[:, 0]`.
:param latitude_spacing_deg: Spacing (deg N) between grid points in adjacent
rows.
:param longitude_spacing_deg: Spacing (deg E) between grid points in
adjacent columns.
:param colour_map_object: Instance of `matplotlib.pyplot.cm`. If this is
None, the default colour scheme for `field_name` will be used.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`. If
this is None, the default colour scheme for `field_name` will be used.
:param refl_opacity: Opacity for reflectivity colour scheme. Used only if
`colour_map_object is None and colour_norm_object is None`.
"""
field_matrix = _field_to_plotting_units(
field_matrix=field_matrix, field_name=field_name)
(field_matrix_at_edges, grid_cell_edge_latitudes_deg,
grid_cell_edge_longitudes_deg
) = grids.latlng_field_grid_points_to_edges(
field_matrix=field_matrix, min_latitude_deg=min_grid_point_latitude_deg,
min_longitude_deg=min_grid_point_longitude_deg,
lat_spacing_deg=latitude_spacing_deg,
lng_spacing_deg=longitude_spacing_deg)
field_matrix_at_edges = numpy.ma.masked_where(
numpy.isnan(field_matrix_at_edges), field_matrix_at_edges)
use_default_colour_scheme = (
colour_map_object is None or colour_norm_object is None
)
if use_default_colour_scheme:
opacity = (
refl_opacity if field_name in radar_utils.REFLECTIVITY_NAMES
else DEFAULT_OPACITY
)
colour_map_object, colour_norm_object = get_default_colour_scheme(
field_name=field_name, opacity=opacity)
else:
if hasattr(colour_norm_object, 'boundaries'):
colour_norm_object.boundaries = _field_to_plotting_units(
field_matrix=colour_norm_object.boundaries,
field_name=field_name)
else:
colour_norm_object.vmin = _field_to_plotting_units(
field_matrix=colour_norm_object.vmin, field_name=field_name)
colour_norm_object.vmax = _field_to_plotting_units(
field_matrix=colour_norm_object.vmax, field_name=field_name)
if hasattr(colour_norm_object, 'boundaries'):
min_colour_value = colour_norm_object.boundaries[0]
max_colour_value = colour_norm_object.boundaries[-1]
else:
min_colour_value = colour_norm_object.vmin
max_colour_value = colour_norm_object.vmax
pyplot.pcolormesh(
grid_cell_edge_longitudes_deg, grid_cell_edge_latitudes_deg,
field_matrix_at_edges, cmap=colour_map_object, norm=colour_norm_object,
vmin=min_colour_value, vmax=max_colour_value, shading='flat',
edgecolors='None', axes=axes_object, zorder=-1e11)
def plot_2d_grid_without_coords(
field_matrix, field_name, axes_object, plot_grid_lines=True,
font_size=DEFAULT_FONT_SIZE, annotation_string=None,
colour_map_object=None, colour_norm_object=None,
refl_opacity=DEFAULT_OPACITY):
"""Plots 2-D grid as colour map.
In this case the grid is not georeferenced (convenient for storm-centered
radar images).
To use the default colour scheme for the given radar field, leave
`colour_map_object` and `colour_norm_object` empty.
:param field_matrix: See doc for `plot_latlng_grid`.
:param field_name: Same.
:param axes_object: Same.
:param plot_grid_lines: Boolean flag. If True, will plot grid lines on
radar image.
:param font_size: Font size for annotation.
:param annotation_string: Annotation (will be printed in the bottom-center).
If you want no annotation, leave this alone.
:param colour_map_object: See doc for `plot_latlng_grid`.
:param colour_norm_object: Same.
:param refl_opacity: Same.
:return: colour_map_object: Same as input, except default might have been
set.
:return: colour_norm_object: Same as input, except default might have been
set.
"""
error_checking.assert_is_numpy_array_without_nan(field_matrix)
error_checking.assert_is_numpy_array(field_matrix, num_dimensions=2)
error_checking.assert_is_boolean(plot_grid_lines)
field_matrix = _field_to_plotting_units(
field_matrix=field_matrix, field_name=field_name)
field_matrix = numpy.ma.masked_where(
numpy.isnan(field_matrix), field_matrix
)
use_default_colour_scheme = (
colour_map_object is None or colour_norm_object is None
)
if use_default_colour_scheme:
opacity = (
refl_opacity if field_name in radar_utils.REFLECTIVITY_NAMES
else DEFAULT_OPACITY
)
colour_map_object, colour_norm_object = get_default_colour_scheme(
field_name=field_name, opacity=opacity)
else:
if hasattr(colour_norm_object, 'boundaries'):
colour_norm_object.boundaries = _field_to_plotting_units(
field_matrix=colour_norm_object.boundaries,
field_name=field_name)
else:
colour_norm_object.vmin = _field_to_plotting_units(
field_matrix=colour_norm_object.vmin, field_name=field_name)
colour_norm_object.vmax = _field_to_plotting_units(
field_matrix=colour_norm_object.vmax, field_name=field_name)
if hasattr(colour_norm_object, 'boundaries'):
min_colour_value = colour_norm_object.boundaries[0]
max_colour_value = colour_norm_object.boundaries[-1]
else:
min_colour_value = colour_norm_object.vmin
max_colour_value = colour_norm_object.vmax
axes_object.pcolormesh(
field_matrix, cmap=colour_map_object, norm=colour_norm_object,
vmin=min_colour_value, vmax=max_colour_value, shading='flat',
edgecolors='None', zorder=-1e11)
if plot_grid_lines:
x_coord_limits = axes_object.get_xlim()
x_grid_coords = numpy.linspace(
x_coord_limits[0], x_coord_limits[1], num=5, dtype=float
)[1:-1]
y_coord_limits = axes_object.get_ylim()
y_grid_coords = numpy.linspace(
y_coord_limits[0], y_coord_limits[1], num=5, dtype=float
)[1:-1]
axes_object.set_xticks(x_grid_coords)
axes_object.set_yticks(y_grid_coords)
axes_object.grid(
b=True, which='major', axis='both', linestyle='--', linewidth=2)
axes_object.xaxis.set_ticklabels([])
axes_object.yaxis.set_ticklabels([])
axes_object.xaxis.set_ticks_position('none')
axes_object.yaxis.set_ticks_position('none')
if annotation_string is not None:
error_checking.assert_is_string(annotation_string)
axes_object.text(
0.5, 0.01, annotation_string, fontsize=font_size, color='k',
bbox=TEXT_BOUNDING_BOX_DICT, horizontalalignment='center',
verticalalignment='bottom', transform=axes_object.transAxes,
zorder=1e10)
return colour_map_object, colour_norm_object
def plot_many_2d_grids(
data_matrix, field_names, axes_objects, panel_names=None,
plot_grid_lines=True, colour_map_objects=None, colour_norm_objects=None,
refl_opacity=DEFAULT_OPACITY, plot_colour_bar_flags=None,
panel_name_font_size=DEFAULT_FONT_SIZE,
colour_bar_font_size=DEFAULT_FONT_SIZE,
colour_bar_length=DEFAULT_COLOUR_BAR_LENGTH):
"""Plots many 2-D grids in paneled figure.
M = number of rows in grid
N = number of columns in grid
C = number of fields
:param data_matrix: M-by-N-by-C numpy array of radar values.
:param field_names: length-C list of field names.
:param axes_objects: length-C list of axes handles (instances of
`matplotlib.axes._subplots.AxesSubplot`).
:param panel_names: length-C list of panel names (to be printed at bottom of
each panel). If None, panel names will not be printed.
:param plot_grid_lines: Boolean flag. If True, will plot grid lines over
radar images.
:param colour_map_objects: length-C list of colour schemes (instances of
`matplotlib.pyplot.cm` or similar). If None, will use default colour
scheme for each field.
:param colour_norm_objects: length-C list of colour-normalizers (instances
of `matplotlib.colors.BoundaryNorm` or similar). If None, will use
default normalizer for each field.
:param refl_opacity: Opacity for reflectivity colour scheme. Used only if
`colour_map_objects is None and colour_norm_objects is None`.
:param plot_colour_bar_flags: length-C numpy array of Boolean flags. If
`plot_colour_bar_flags[k] == True`, will plot colour bar for [k]th
panel. If None, will plot no colour bars.
:param panel_name_font_size: Font size for panel names.
:param colour_bar_font_size: Font size for colour-bar tick marks.
:param colour_bar_length: Length of colour bars (as fraction of axis
length).
:return: colour_bar_objects: length-C list of colour bars. If
`plot_colour_bar_flags[k] == False`, colour_bar_objects[k] will be None.
"""
error_checking.assert_is_numpy_array(data_matrix, num_dimensions=3)
num_fields = data_matrix.shape[-1]
these_expected_dim = numpy.array([num_fields], dtype=int)
error_checking.assert_is_string_list(field_names)
error_checking.assert_is_numpy_array(
numpy.array(field_names), exact_dimensions=these_expected_dim
)
error_checking.assert_is_numpy_array(
numpy.array(axes_objects), exact_dimensions=these_expected_dim
)
if panel_names is None:
panel_names = [None] * num_fields
else:
error_checking.assert_is_string_list(panel_names)
error_checking.assert_is_numpy_array(
numpy.array(panel_names), exact_dimensions=these_expected_dim
)
if colour_map_objects is None or colour_norm_objects is None:
colour_map_objects = [None] * num_fields
colour_norm_objects = [None] * num_fields
else:
error_checking.assert_is_numpy_array(
numpy.array(colour_map_objects), exact_dimensions=these_expected_dim
)
error_checking.assert_is_numpy_array(
numpy.array(colour_norm_objects),
exact_dimensions=these_expected_dim
)
if plot_colour_bar_flags is None:
plot_colour_bar_flags = numpy.full(num_fields, 0, dtype=bool)
error_checking.assert_is_boolean_numpy_array(plot_colour_bar_flags)
error_checking.assert_is_numpy_array(
plot_colour_bar_flags, exact_dimensions=these_expected_dim)
colour_bar_objects = [None] * num_fields
for k in range(num_fields):
this_colour_map_object, this_colour_norm_object = (
plot_2d_grid_without_coords(
field_matrix=data_matrix[..., k], field_name=field_names[k],
axes_object=axes_objects[k], annotation_string=panel_names[k],
font_size=panel_name_font_size, plot_grid_lines=plot_grid_lines,
colour_map_object=copy.deepcopy(colour_map_objects[k]),
colour_norm_object=copy.deepcopy(colour_norm_objects[k]),
refl_opacity=refl_opacity
)
)
if not plot_colour_bar_flags[k]:
continue
colour_bar_objects[k] = plotting_utils.plot_colour_bar(
axes_object_or_matrix=axes_objects[k],
data_matrix=data_matrix[..., k],
colour_map_object=this_colour_map_object,
colour_norm_object=this_colour_norm_object,
orientation_string='horizontal', font_size=colour_bar_font_size,
fraction_of_axis_length=colour_bar_length,
extend_min=field_names[k] in SHEAR_VORT_DIV_NAMES, extend_max=True
)
return colour_bar_objects
def plot_3d_grid(
data_matrix, axes_objects, field_name, heights_metres, ground_relative,
plot_panel_names=True, plot_grid_lines=True, colour_map_object=None,
colour_norm_object=None, refl_opacity=DEFAULT_OPACITY,
panel_name_font_size=DEFAULT_FONT_SIZE):
"""Plots 3-D grid in paneled figure (one height per panel).
M = number of rows in grid
N = number of columns in grid
H = number of heights in grid
:param data_matrix: M-by-N-by-H numpy array of radar values.
:param axes_objects: length-H list of axes handles (instances of
`matplotlib.axes._subplots.AxesSubplot`).
:param field_name: Name of radar field.
:param heights_metres: length-H numpy array of heights.
:param ground_relative: Boolean flag. If True, heights are ground-relative.
If False, heights are sea-level-relative.
:param plot_panel_names: Boolean flag. If True, will plot height (example:
"3 km AGL") at bottom of each panel.
:param plot_grid_lines: See doc for `plot_2d_grid_without_coords`.
:param colour_map_object: Same.
:param colour_norm_object: Same.
:param refl_opacity: Same.
:param panel_name_font_size: Font size for panel names.
"""
error_checking.assert_is_numpy_array(data_matrix, num_dimensions=3)
num_heights = data_matrix.shape[-1]
these_expected_dim = numpy.array([num_heights], dtype=int)
error_checking.assert_is_numpy_array(
numpy.array(axes_objects), exact_dimensions=these_expected_dim
)
error_checking.assert_is_geq_numpy_array(heights_metres, 0.)
error_checking.assert_is_numpy_array(
numpy.array(heights_metres), exact_dimensions=these_expected_dim
)
error_checking.assert_is_boolean(ground_relative)
error_checking.assert_is_boolean(plot_panel_names)
for k in range(num_heights):
if plot_panel_names:
this_panel_name = '{0:d} km {1:s}'.format(
int(numpy.round(heights_metres[k] * METRES_TO_KM)),
'AGL' if ground_relative else 'ASL'
)
else:
this_panel_name = None
this_colour_norm_object = (
colour_norm_object if k == num_heights - 1
else copy.deepcopy(colour_norm_object)
)
plot_2d_grid_without_coords(
field_matrix=data_matrix[..., k],
field_name=field_name, axes_object=axes_objects[k],
annotation_string=this_panel_name, plot_grid_lines=plot_grid_lines,
colour_map_object=colour_map_object,
colour_norm_object=this_colour_norm_object,
refl_opacity=refl_opacity, font_size=panel_name_font_size)
|
mit
|
anand-c-goog/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/dnn_test.py
|
5
|
39527
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.ops import math_ops
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_logistic_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
def _iris_input_multiclass_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
class DNNClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
def testLogisticRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[50], [20], [10]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[0.8], [0.], [0.2]], dtype=tf.float32)
return features, target
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=1000)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
# Prediction probabilities mirror the target column, which proves that the
# classifier learns from float input.
self.assertAllClose(
predictions_proba, [[0.2, 0.8], [1., 0.], [0.8, 0.2]], atol=0.05)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
self.assertLess(scores['loss'], 0.3)
def testMultiClass_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [150] instead of [150, 1]."""
def _input_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = tf.contrib.learn.datasets.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
classifier = tf.contrib.learn.DNNClassifier(
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(scores['loss'], 0.562, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(scores['loss'], 1.06, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.DNNClassifier(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the accuracy
# should be close to 1.
self.assertGreater(scores['accuracy'], 0.9)
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertListEqual(list(predictions), [1, 0, 0])
predictions = classifier.predict_proba(input_fn=_input_fn,
as_iterable=False)
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.1)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertListEqual(predictions, [1, 0, 0])
predictions = list(
classifier.predict_proba(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(
predictions, [[0., 1., 0.], [1., 0., 0.], [1., 0., 0.]], atol=0.3)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, target
def _my_metric_op(predictions, targets):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
targets = tf.to_float(targets)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
targets = math_ops.cast(targets, predictions.dtype)
return tf.reduce_sum(tf.mul(predictions, targets))
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_accuracy,
prediction_key='classes'),
'my_precision': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_precision,
prediction_key='classes'),
'my_metric': MetricSpec(
metric_fn=_my_metric_op,
prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name': MetricSpec(
metric_fn=tf.contrib.metrics.streaming_auc,
prediction_key='bad_type')})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict(input_fn=predict_input_fn)
del classifier
classifier2 = tf.contrib.learn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.2], [.1]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([[1], [0], [0]], dtype=tf.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=5))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertLess(scores['loss'], 0.3)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
tf.contrib.layers.real_valued_column('age'),
tf.contrib.layers.embedding_column(language, dimension=1)
]
classifier = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.8)
self.assertLess(scores['loss'], 0.3)
class DNNRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_logistic_fn, steps=200)
scores = regressor.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_MatrixData_Target1D(self):
"""Same as the last test, but target shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100], dtype=tf.int32)
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [tf.contrib.layers.real_valued_column('', dimension=4)]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[.8], [.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.DNNRegressor(
weight_column_name='w',
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
target = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predictions, target, atol=0.2)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
target = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predictions, target, atol=0.2)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs),
}
return features, target
def _my_metric_op(predictions, targets):
return tf.reduce_sum(tf.mul(predictions, targets))
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=[tf.contrib.layers.real_valued_column('x')],
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'):
tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict(input_fn=predict_input_fn))
del regressor
regressor2 = tf.contrib.learn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
predictions2 = list(regressor2.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(
tf.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs),
'language': tf.SparseTensor(
values=tf.train.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
sparse_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
tf.contrib.layers.embedding_column(sparse_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
]
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=3))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def boston_input_fn():
boston = tf.contrib.learn.datasets.load_boston()
features = tf.cast(tf.reshape(tf.constant(boston.data), [-1, 13]), tf.float32)
target = tf.cast(tf.reshape(tf.constant(boston.target), [-1, 1]), tf.float32)
return features, target
class FeatureColumnTest(tf.test.TestCase):
def testTrain(self):
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
valexandersaulys/prudential_insurance_kaggle
|
venv/lib/python2.7/site-packages/pandas/msgpack/__init__.py
|
9
|
1180
|
# coding: utf-8
from pandas.msgpack._version import version
from pandas.msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
from pandas.msgpack._packer import Packer
from pandas.msgpack._unpacker import unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
|
gpl-2.0
|
gfyoung/pandas
|
pandas/tests/extension/arrow/test_bool.py
|
1
|
2932
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_bool_dtype
from pandas.tests.extension import base
pytest.importorskip("pyarrow", minversion="0.13.0")
from .arrays import ArrowBoolArray, ArrowBoolDtype # isort:skip
@pytest.fixture
def dtype():
return ArrowBoolDtype()
@pytest.fixture
def data():
values = np.random.randint(0, 2, size=100, dtype=bool)
values[1] = ~values[0]
return ArrowBoolArray.from_scalars(values)
@pytest.fixture
def data_missing():
return ArrowBoolArray.from_scalars([None, True])
def test_basic_equals(data):
# https://github.com/pandas-dev/pandas/issues/34660
assert pd.Series(data).equals(pd.Series(data))
class BaseArrowTests:
pass
class TestDtype(BaseArrowTests, base.BaseDtypeTests):
def test_array_type_with_arg(self, data, dtype):
pytest.skip("GH-22666")
class TestInterface(BaseArrowTests, base.BaseInterfaceTests):
def test_copy(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.copy()
def test_view(self, data):
# __setitem__ does not work, so we only have a smoke-test
data.view()
@pytest.mark.xfail(raises=AssertionError, reason="Not implemented yet")
def test_contains(self, data, data_missing):
super().test_contains(data, data_missing)
class TestConstructors(BaseArrowTests, base.BaseConstructorsTests):
def test_from_dtype(self, data):
pytest.skip("GH-22666")
# seems like some bug in isna on empty BoolArray returning floats.
@pytest.mark.xfail(reason="bad is-na for empty data")
def test_from_sequence_from_cls(self, data):
super().test_from_sequence_from_cls(data)
@pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899")
def test_series_constructor_no_data_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_no_data_with_index(dtype, na_value)
@pytest.mark.xfail(reason="pa.NULL is not recognised as scalar, GH-33899")
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
# pyarrow.lib.ArrowInvalid: only handle 1-dimensional arrays
super().test_series_constructor_scalar_na_with_index(dtype, na_value)
@pytest.mark.xfail(reason="raises AssertionError")
def test_construct_empty_dataframe(self, dtype):
super().test_construct_empty_dataframe(dtype)
class TestReduce(base.BaseNoReduceTests):
def test_reduce_series_boolean(self):
pass
class TestReduceBoolean(base.BaseBooleanReduceTests):
pass
def test_is_bool_dtype(data):
assert is_bool_dtype(data)
assert pd.core.common.is_bool_indexer(data)
s = pd.Series(range(len(data)))
result = s[data]
expected = s[np.asarray(data)]
tm.assert_series_equal(result, expected)
|
bsd-3-clause
|
schets/scikit-learn
|
sklearn/mixture/tests/test_dpgmm.py
|
7
|
4553
|
import unittest
import sys
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
|
bsd-3-clause
|
wschenck/nest-simulator
|
pynest/nest/lib/hl_api_types.py
|
13
|
38621
|
# -*- coding: utf-8 -*-
#
# hl_api_types.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Classes defining the different PyNEST types
"""
from ..ll_api import *
from .. import pynestkernel as kernel
from .hl_api_helper import *
from .hl_api_simulation import GetKernelStatus
import numpy
import json
from math import floor, log
try:
import pandas
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
__all__ = [
'CollocatedSynapses',
'CreateParameter',
'Mask',
'NodeCollection',
'Parameter',
'serializable',
'SynapseCollection',
'to_json',
]
def CreateParameter(parametertype, specs):
"""
Create a parameter.
Parameters
----------
parametertype : string
Parameter type with or without distance dependency.
Can be one of the following: 'constant', 'linear', 'exponential', 'gaussian', 'gaussian2D',
'uniform', 'normal', 'lognormal', 'distance', 'position'
specs : dict
Dictionary specifying the parameters of the provided
`parametertype`, see **Parameter types**.
Returns
-------
``Parameter``:
Object representing the parameter
Notes
-----
- Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for
instance :py:func:`.uniform`.
**Parameter types**
Some available parameter types (`parametertype` parameter), their function and
acceptable keys for their corresponding specification dictionaries
* Constant
::
'constant' :
{'value' : float} # constant value
* Randomization
::
# random parameter with uniform distribution in [min,max)
'uniform' :
{'min' : float, # minimum value, default: 0.0
'max' : float} # maximum value, default: 1.0
# random parameter with normal distribution, optionally truncated
# to [min,max)
'normal':
{'mean' : float, # mean value, default: 0.0
'sigma': float, # standard deviation, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
# random parameter with lognormal distribution,
# optionally truncated to [min,max)
'lognormal' :
{'mu' : float, # mean value of logarithm, default: 0.0
'sigma': float, # standard deviation of log, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
"""
return sli_func('CreateParameter', {parametertype: specs})
class NodeCollectionIterator(object):
"""
Iterator class for `NodeCollection`.
Returns
-------
`NodeCollection`:
Single node ID `NodeCollection` of respective iteration.
"""
def __init__(self, nc):
self._nc = nc
self._increment = 0
def __iter__(self):
return self
def __next__(self):
if self._increment > len(self._nc) - 1:
raise StopIteration
val = sli_func('Take', self._nc._datum, [self._increment + (self._increment >= 0)])
self._increment += 1
return val
class NodeCollection(object):
"""
Class for `NodeCollection`.
`NodeCollection` represents the nodes of a network. The class supports
iteration, concatenation, indexing, slicing, membership, length, conversion to and
from lists, test for membership, and test for equality. By using the
membership functions :py:func:`get()` and :py:func:`set()`, you can get and set desired
parameters.
A `NodeCollection` is created by the :py:func:`.Create` function, or by converting a
list of nodes to a `NodeCollection` with ``nest.NodeCollection(list)``.
If your nodes have spatial extent, use the member parameter ``spatial`` to get the spatial information.
Example
-------
::
import nest
nest.ResetKernel()
# Create NodeCollection representing nodes
nc = nest.Create('iaf_psc_alpha', 10)
# Convert from list
node_ids_in = [2, 4, 6, 8]
new_nc = nest.NodeCollection(node_ids_in)
# Convert to list
nc_list = nc.tolist()
# Concatenation
Enrns = nest.Create('aeif_cond_alpha', 600)
Inrns = nest.Create('iaf_psc_alpha', 400)
nrns = Enrns + Inrns
# Slicing and membership
print(new_nc[2])
print(new_nc[1:2])
6 in new_nc
"""
_datum = None
def __init__(self, data=None):
if data is None:
data = []
if isinstance(data, kernel.SLIDatum):
if data.dtype != "nodecollectiontype":
raise TypeError("Need NodeCollection Datum.")
self._datum = data
else:
# Data from user, must be converted to datum
# Data can be anything that can be converted to a NodeCollection,
# such as list, tuple, etc.
nc = sli_func('cvnodecollection', data)
self._datum = nc._datum
def __iter__(self):
return NodeCollectionIterator(self)
def __add__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError()
return sli_func('join', self._datum, other._datum)
def __getitem__(self, key):
if isinstance(key, slice):
if key.start is None:
start = 1
else:
start = key.start + 1 if key.start >= 0 else max(key.start, -1 * self.__len__())
if start > self.__len__():
raise IndexError('slice start value outside of the NodeCollection')
if key.stop is None:
stop = self.__len__()
else:
stop = min(key.stop, self.__len__()) if key.stop >= 0 else key.stop - 1
if abs(stop) > self.__len__():
raise IndexError('slice stop value outside of the NodeCollection')
step = 1 if key.step is None else key.step
if step < 1:
raise IndexError('slicing step for NodeCollection must be strictly positive')
return sli_func('Take', self._datum, [start, stop, step])
elif isinstance(key, (int, numpy.integer)):
if abs(key + (key >= 0)) > self.__len__():
raise IndexError('index value outside of the NodeCollection')
return sli_func('Take', self._datum, [key + (key >= 0)])
elif isinstance(key, (list, tuple)):
if len(key) == 0:
return NodeCollection([])
# Must check if elements are bool first, because bool inherits from int
if all(isinstance(x, bool) for x in key):
if len(key) != len(self):
raise IndexError('Bool index array must be the same length as NodeCollection')
np_key = numpy.array(key, dtype=numpy.bool)
# Checking that elements are not instances of bool too, because bool inherits from int
elif all(isinstance(x, int) and not isinstance(x, bool) for x in key):
np_key = numpy.array(key, dtype=numpy.uint64)
if len(numpy.unique(np_key)) != len(np_key):
raise ValueError('All node IDs in a NodeCollection have to be unique')
else:
raise TypeError('Indices must be integers or bools')
return take_array_index(self._datum, np_key)
elif isinstance(key, numpy.ndarray):
if len(key) == 0:
return NodeCollection([])
if len(key.shape) != 1:
raise TypeError('NumPy indices must one-dimensional')
is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type)
if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)):
raise TypeError('NumPy indices must be an array of integers or bools')
if is_booltype and len(key) != len(self):
raise IndexError('Bool index array must be the same length as NodeCollection')
if not is_booltype and len(numpy.unique(key)) != len(key):
raise ValueError('All node IDs in a NodeCollection have to be unique')
return take_array_index(self._datum, key)
else:
raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices')
def __contains__(self, node_id):
return sli_func('MemberQ', self._datum, node_id)
def __eq__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__))
if self.__len__() != other.__len__():
return False
return sli_func('eq', self, other)
def __neq__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError()
return not self == other
def __len__(self):
return sli_func('size', self._datum)
def __str__(self):
return sli_func('pcvs', self._datum)
def __repr__(self):
return sli_func('pcvs', self._datum)
def get(self, *params, **kwargs):
"""
Get parameters from nodes.
Parameters
----------
params : str or list, optional
Parameters to get from the nodes. It must be one of the following:
- A single string.
- A list of strings.
- One or more strings, followed by a string or list of strings.
This is for hierarchical addressing.
output : str, ['pandas','json'], optional
If the returned data should be in a Pandas DataFrame or in a
JSON serializable format.
Returns
-------
int or float:
If there is a single node in the `NodeCollection`, and a single
parameter in params.
array_like:
If there are multiple nodes in the `NodeCollection`, and a single
parameter in params.
dict:
If there are multiple parameters in params. Or, if no parameters
are specified, a dictionary containing aggregated parameter-values
for all nodes is returned.
DataFrame:
Pandas Data frame if output should be in pandas format.
Raises
------
TypeError
If the input params are of the wrong form.
KeyError
If the specified parameter does not exist for the nodes.
See Also
--------
:py:func:`set`,
:py:func:`GetStatus()<nest.lib.hl_api_info.GetStatus>`,
:py:func:`SetStatus()<nest.lib.hl_api_info.SetStatus>`
Examples
--------
>>> nodes.get()
{'archiver_length': (0, 0, 0),
'beta_Ca': (0.001, 0.001, 0.001),
'C_m': (250.0, 250.0, 250.0),
...
'V_th': (-55.0, -55.0, -55.0),
'vp': (0, 0, 0)}
>>> nodes.get('V_m')
(-70.0, -70.0, -70.0)
>>> nodes[0].get('V_m')
-70.0
>>> nodes.get('V_m', 'C_m')
{'V_m': (-70.0, -70.0, -70.0), 'C_m': (250.0, 250.0, 250.0)}
>>> voltmeter.get('events', 'senders')
array([...], dtype=int64)
"""
if not self:
raise ValueError('Cannot get parameter of empty NodeCollection')
# ------------------------- #
# Checks of input #
# ------------------------- #
if not kwargs:
output = ''
elif 'output' in kwargs:
output = kwargs['output']
if output == 'pandas' and not HAVE_PANDAS:
raise ImportError('Pandas could not be imported')
else:
raise TypeError('Got unexpected keyword argument')
pandas_output = output == 'pandas'
if len(params) == 0:
# get() is called without arguments
result = sli_func('get', self._datum)
elif len(params) == 1:
# params is a tuple with a string or list of strings
result = get_parameters(self, params[0])
else:
# Hierarchical addressing
result = get_parameters_hierarchical_addressing(self, params)
if pandas_output:
index = self.get('global_id')
if len(params) == 1 and is_literal(params[0]):
# params is a string
result = {params[0]: result}
elif len(params) > 1 and is_literal(params[1]):
# hierarchical, single string
result = {params[1]: result}
if len(self) == 1:
index = [index]
result = {key: [val] for key, val in result.items()}
result = pandas.DataFrame(result, index=index)
elif output == 'json':
result = to_json(result)
return result
def set(self, params=None, **kwargs):
"""
Set the parameters of nodes to params.
NB! This is almost the same implementation as `SetStatus`.
If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values
can be single values or list of the same size as the `NodeCollection`.
Parameters
----------
params : str or dict or list
Dictionary of parameters (either lists or single values) or list of dictionaries of parameters
of same length as the `NodeCollection`.
kwargs : keyword argument pairs
Named arguments of parameters of the elements in the `NodeCollection`.
Raises
------
TypeError
If the input params are of the wrong form.
KeyError
If the specified parameter does not exist for the nodes.
See Also
--------
:py:func:`get`,
:py:func:`SetStatus()<nest.lib.hl_api_info.SetStatus>`,
:py:func:`GetStatus()<nest.lib.hl_api_info.GetStatus>`
"""
if not self:
return
if kwargs and params is None:
params = kwargs
elif kwargs and params:
raise TypeError("must either provide params or kwargs, but not both.")
local_nodes = [self.local] if len(self) == 1 else self.local
if isinstance(params, dict) and all(local_nodes):
node_params = self[0].get()
contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for
key, vals in params.items()]
if any(contains_list):
temp_param = [{} for _ in range(self.__len__())]
for key, vals in params.items():
if not is_iterable(vals):
for temp_dict in temp_param:
temp_dict[key] = vals
else:
for i, temp_dict in enumerate(temp_param):
temp_dict[key] = vals[i]
params = temp_param
if (isinstance(params, (list, tuple)) and self.__len__() != len(params)):
raise TypeError("status dict must be a dict, or a list of dicts of length {} ".format(self.__len__()))
sli_func('SetStatus', self._datum, params)
def tolist(self):
"""
Convert `NodeCollection` to list.
"""
if self.__len__() == 0:
return []
return (list(self.get('global_id')) if len(self) > 1
else [self.get('global_id')])
def index(self, node_id):
"""
Find the index of a node ID in the `NodeCollection`.
Parameters
----------
node_id : int
Global ID to be found.
Raises
------
ValueError
If the node ID is not in the `NodeCollection`.
"""
index = sli_func('Find', self._datum, node_id)
if index == -1:
raise ValueError('{} is not in NodeCollection'.format(node_id))
return index
def __bool__(self):
"""Converts the NodeCollection to a bool. False if it is empty, True otherwise."""
return len(self) > 0
def __array__(self, dtype=None):
"""Convert the NodeCollection to a NumPy array."""
return numpy.array(self.tolist(), dtype=dtype)
def __getattr__(self, attr):
if not self:
raise AttributeError('Cannot get attribute of empty NodeCollection')
if attr == 'spatial':
metadata = sli_func('GetMetadata', self._datum)
val = metadata if metadata else None
super().__setattr__(attr, val)
return self.spatial
# NumPy compatibility check:
# raises AttributeError to tell NumPy that interfaces other than
# __array__ are not available (otherwise get_parameters would be
# queried, KeyError would be raised, and all would crash)
if attr.startswith('__array_'):
raise AttributeError
return self.get(attr)
def __setattr__(self, attr, value):
# `_datum` is the only property of NodeCollection that should not be
# interpreted as a property of the model
if attr == '_datum':
super().__setattr__(attr, value)
else:
self.set({attr: value})
class SynapseCollectionIterator(object):
"""
Iterator class for SynapseCollection.
"""
def __init__(self, synapse_collection):
self._iter = iter(synapse_collection._datum)
def __iter__(self):
return self
def __next__(self):
return SynapseCollection(next(self._iter))
class SynapseCollection(object):
"""
Class for Connections.
`SynapseCollection` represents the connections of a network. The class supports indexing, iteration, length and
equality. You can get and set connection parameters by using the membership functions :py:func:`get()` and
:py:func:`set()`. By using the membership function :py:func:`sources()` you get an iterator over
source nodes, while :py:func:`targets()` returns an interator over the target nodes of the connections.
A SynapseCollection is created by the :py:func:`.GetConnections` function.
"""
_datum = None
def __init__(self, data):
if isinstance(data, list):
for datum in data:
if (not isinstance(datum, kernel.SLIDatum) or
datum.dtype != "connectiontype"):
raise TypeError("Expected Connection Datum.")
self._datum = data
elif data is None:
# We can have an empty SynapseCollection if there are no connections.
self._datum = data
else:
if (not isinstance(data, kernel.SLIDatum) or
data.dtype != "connectiontype"):
raise TypeError("Expected Connection Datum.")
# self._datum needs to be a list of Connection datums.
self._datum = [data]
self.print_full = False
def __iter__(self):
return SynapseCollectionIterator(self)
def __len__(self):
if self._datum is None:
return 0
return len(self._datum)
def __eq__(self, other):
if not isinstance(other, SynapseCollection):
raise NotImplementedError()
if self.__len__() != other.__len__():
return False
self_get = self.get(['source', 'target', 'target_thread',
'synapse_id', 'port'])
other_get = other.get(['source', 'target', 'target_thread',
'synapse_id', 'port'])
if self_get != other_get:
return False
return True
def __neq__(self, other):
if not isinstance(other, SynapseCollection):
raise NotImplementedError()
return not self == other
def __getitem__(self, key):
if isinstance(key, slice):
return SynapseCollection(self._datum[key])
else:
return SynapseCollection([self._datum[key]])
def __str__(self):
"""
Printing a `SynapseCollection` returns something of the form:
source target synapse model weight delay
-------- -------- --------------- -------- -------
1 4 static_synapse 1.000 1.000
2 4 static_synapse 2.000 1.000
1 3 stdp_synapse 4.000 1.000
1 4 stdp_synapse 3.000 1.000
2 3 stdp_synapse 3.000 1.000
2 4 stdp_synapse 2.000 1.000
If your SynapseCollection has more than 36 elements, only the first and last 15 connections are printed. To
display all, first set `print_full = True`.
::
conns = nest.GetConnections()
conns.print_full = True
print(conns)
"""
def format_row_(s, t, sm, w, dly):
try:
return f'{s:>{src_len-1}d} {t:>{trg_len}d} {sm:>{sm_len}s} {w:>#{w_len}.{4}g} {dly:>#{d_len}.{4}g}'
except ValueError:
# Used when we have many connections and print_full=False
return f'{s:>{src_len-1}} {t:>{trg_len}} {sm:>{sm_len}} {w:>{w_len}} {dly:>{d_len}}'
MAX_SIZE_FULL_PRINT = 35 # 35 is arbitrarily chosen.
params = self.get()
if len(params) == 0:
return 'The synapse collection does not contain any connections.'
srcs = params['source']
trgt = params['target']
wght = params['weight']
dlay = params['delay']
s_model = params['synapse_model']
if isinstance(srcs, int):
srcs = [srcs]
trgt = [trgt]
wght = [wght]
dlay = [dlay]
s_model = [s_model]
src_h = 'source'
trg_h = 'target'
sm_h = 'synapse model'
w_h = 'weight'
d_h = 'delay'
# Find maximum number of characters for each column, used to determine width of column
src_len = max(len(src_h) + 2, floor(log(max(srcs), 10)))
trg_len = max(len(trg_h) + 2, floor(log(max(trgt), 10)))
sm_len = max(len(sm_h) + 2, len(max(s_model, key=len)))
w_len = len(w_h) + 2
d_len = len(d_h) + 2
# 35 is arbitrarily chosen.
if len(srcs) >= MAX_SIZE_FULL_PRINT and not self.print_full:
# u'\u22EE ' is the unicode for vertical ellipsis, used when we have many connections
srcs = srcs[:15] + [u'\u22EE '] + srcs[-15:]
trgt = trgt[:15] + [u'\u22EE '] + trgt[-15:]
wght = wght[:15] + [u'\u22EE '] + wght[-15:]
dlay = dlay[:15] + [u'\u22EE '] + dlay[-15:]
s_model = s_model[:15] + [u'\u22EE '] + s_model[-15:]
headers = f'{src_h:^{src_len}} {trg_h:^{trg_len}} {sm_h:^{sm_len}} {w_h:^{w_len}} {d_h:^{d_len}}' + '\n'
borders = '-'*src_len + ' ' + '-'*trg_len + ' ' + '-'*sm_len + ' ' + '-'*w_len + ' ' + '-'*d_len + '\n'
output = '\n'.join(format_row_(s, t, sm, w, d) for s, t, sm, w, d in zip(srcs, trgt, s_model, wght, dlay))
result = headers + borders + output
return result
def __getattr__(self, attr):
if attr == 'distance':
dist = sli_func('Distance', self._datum)
super().__setattr__(attr, dist)
return self.distance
return self.get(attr)
def __setattr__(self, attr, value):
# `_datum` is the only property of SynapseCollection that should not be
# interpreted as a property of the model
if attr == '_datum' or 'print_full':
super().__setattr__(attr, value)
else:
self.set({attr: value})
def sources(self):
"""Returns iterator containing the source node IDs of the `SynapseCollection`."""
sources = self.get('source')
if not isinstance(sources, (list, tuple)):
sources = (sources,)
return iter(sources)
def targets(self):
"""Returns iterator containing the target node IDs of the `SynapseCollection`."""
targets = self.get('target')
if not isinstance(targets, (list, tuple)):
targets = (targets,)
return iter(targets)
def get(self, keys=None, output=''):
"""
Return a parameter dictionary of the connections.
If `keys` is a string, a list of values is returned, unless we have a
single connection, in which case the single value is returned.
`keys` may also be a list, in which case a dictionary with a list of
values is returned.
Parameters
----------
keys : str or list, optional
String or a list of strings naming model properties. get
then returns a single value or a dictionary with lists of values
belonging to the given `keys`.
output : str, ['pandas','json'], optional
If the returned data should be in a Pandas DataFrame or in a
JSON serializable format.
Returns
-------
dict:
All parameters, or, if keys is a list of strings, a dictionary with
lists of corresponding parameters
type:
If keys is a string, the corresponding parameter(s) is returned
Raises
------
TypeError
If input params are of the wrong form.
KeyError
If the specified parameter does not exist for the connections.
See Also
--------
set
Examples
--------
>>> conns.get()
{'delay': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
...
'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}
>>> conns.get('weight')
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> conns[0].get('weight')
1.0
>>> nodes.get(['source', 'weight'])
{'source': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}
"""
pandas_output = output == 'pandas'
if pandas_output and not HAVE_PANDAS:
raise ImportError('Pandas could not be imported')
# Return empty tuple if we have no connections or if we have done a nest.ResetKernel()
num_conn = GetKernelStatus('num_connections')
if self.__len__() == 0 or num_conn == 0:
return ()
if keys is None:
cmd = 'GetStatus'
elif is_literal(keys):
# Extracting the correct values will be done in restructure_data below
cmd = 'GetStatus'
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = 'GetStatus {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
sps(self._datum)
sr(cmd)
result = spp()
# Need to restructure the data.
final_result = restructure_data(result, keys)
if pandas_output:
index = (self.get('source') if self.__len__() > 1 else
(self.get('source'),))
if is_literal(keys):
final_result = {keys: final_result}
final_result = pandas.DataFrame(final_result, index=index)
elif output == 'json':
final_result = to_json(final_result)
return final_result
def set(self, params=None, **kwargs):
"""
Set the parameters of the connections to `params`.
NB! This is almost the same implementation as SetStatus
If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values
can be single values or list of the same size as the `SynapseCollection`.
Parameters
----------
params : str or dict or list
Dictionary of parameters (either lists or single values) or list of dictionaries of parameters
of same length as `SynapseCollection`.
kwargs : keyword argument pairs
Named arguments of parameters of the elements in the `SynapseCollection`.
Raises
------
TypeError
If input params are of the wrong form.
KeyError
If the specified parameter does not exist for the connections.
See Also
--------
get
"""
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty
# SynapseCollection, or after having done a nest.ResetKernel().
if self.__len__() == 0 or GetKernelStatus()['network_size'] == 0:
return
if (isinstance(params, (list, tuple)) and
self.__len__() != len(params)):
raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__()))
if kwargs and params is None:
params = kwargs
elif kwargs and params:
raise TypeError("must either provide params or kwargs, but not both.")
if isinstance(params, dict):
node_params = self[0].get()
contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for
key, vals in params.items()]
if any(contains_list):
temp_param = [{} for _ in range(self.__len__())]
for key, vals in params.items():
if not is_iterable(vals):
for temp_dict in temp_param:
temp_dict[key] = vals
else:
for i, temp_dict in enumerate(temp_param):
temp_dict[key] = vals[i]
params = temp_param
params = broadcast(params, self.__len__(), (dict,), "params")
sps(self._datum)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
class CollocatedSynapses(object):
"""
Class for collocated synapse specifications.
Wrapper around a list of specifications, used when calling :py:func:`.Connect`.
Example
-------
::
nodes = nest.Create('iaf_psc_alpha', 3)
syn_spec = nest.CollocatedSynapses({'weight': 4., 'delay': 1.5},
{'synapse_model': 'stdp_synapse'},
{'synapse_model': 'stdp_synapse', 'alpha': 3.})
nest.Connect(nodes, nodes, conn_spec='one_to_one', syn_spec=syn_spec)
conns = nest.GetConnections()
print(conns.alpha)
print(len(syn_spec))
"""
def __init__(self, *args):
self.syn_specs = args
def __len__(self):
return len(self.syn_specs)
class Mask(object):
"""
Class for spatial masks.
Masks are used when creating connections when nodes have spatial extent. A mask
describes the area of the pool population that shall be searched to find nodes to
connect to for any given node in the driver population. Masks are created using
the :py:func:`.CreateMask` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Masks must be created using the CreateMask command."""
if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype":
raise TypeError("expected mask Datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Mask):
raise NotImplementedError()
return sli_func(op, self._datum, other._datum)
def __or__(self, other):
return self._binop("or", other)
def __and__(self, other):
return self._binop("and", other)
def __sub__(self, other):
return self._binop("sub", other)
def Inside(self, point):
"""
Test if a point is inside a mask.
Parameters
----------
point : tuple/list of float values
Coordinate of point
Returns
-------
out : bool
True if the point is inside the mask, False otherwise
"""
return sli_func("Inside", point, self._datum)
class Parameter(object):
"""
Class for parameters
A parameter may be used as a probability kernel when creating
connections and nodes or as synaptic parameters (such as weight and delay).
Parameters are created using the :py:func:`.CreateParameter` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Parameters must be created using the CreateParameter command."""
if not isinstance(datum,
kernel.SLIDatum) or datum.dtype != "parametertype":
raise TypeError("expected parameter datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other, params=None):
if isinstance(other, (int, float)):
other = CreateParameter('constant', {'value': float(other)})
if not isinstance(other, Parameter):
raise NotImplementedError()
if params is None:
return sli_func(op, self._datum, other._datum)
else:
return sli_func(op, self._datum, other._datum, params)
def __add__(self, other):
return self._binop("add", other)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self._binop("sub", other)
def __rsub__(self, other):
return self * (-1) + other
def __neg__(self):
return self * (-1)
def __mul__(self, other):
return self._binop("mul", other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
return self._binop("div", other)
def __truediv__(self, other):
return self._binop("div", other)
def __pow__(self, exponent):
return sli_func("pow", self._datum, float(exponent))
def __lt__(self, other):
return self._binop("compare", other, {'comparator': 0})
def __le__(self, other):
return self._binop("compare", other, {'comparator': 1})
def __eq__(self, other):
return self._binop("compare", other, {'comparator': 2})
def __ne__(self, other):
return self._binop("compare", other, {'comparator': 3})
def __ge__(self, other):
return self._binop("compare", other, {'comparator': 4})
def __gt__(self, other):
return self._binop("compare", other, {'comparator': 5})
def GetValue(self):
"""
Compute value of parameter.
Returns
-------
out : value
The value of the parameter
See also
--------
CreateParameter
Example
-------
::
import nest
# normal distribution parameter
P = nest.CreateParameter('normal', {'mean': 0.0, 'sigma': 1.0})
# get out value
P.GetValue()
"""
return sli_func("GetValue", self._datum)
def is_spatial(self):
return sli_func('ParameterIsSpatial', self._datum)
def apply(self, spatial_nc, positions=None):
if positions is None:
return sli_func('Apply', self._datum, spatial_nc)
else:
if len(spatial_nc) != 1:
raise ValueError('The NodeCollection must contain a single node ID only')
if not isinstance(positions, (list, tuple)):
raise TypeError('Positions must be a list or tuple of positions')
for pos in positions:
if not isinstance(pos, (list, tuple, numpy.ndarray)):
raise TypeError('Each position must be a list or tuple')
if len(pos) != len(positions[0]):
raise ValueError('All positions must have the same number of dimensions')
return sli_func('Apply', self._datum, {'source': spatial_nc, 'targets': positions})
def serializable(data):
"""Make data serializable for JSON.
Parameters
----------
data : any
Returns
-------
data_serialized : str, int, float, list, dict
Data can be encoded to JSON
"""
if isinstance(data, (numpy.ndarray, NodeCollection)):
return data.tolist()
if isinstance(data, SynapseCollection):
# Get full information from SynapseCollection
return serializable(data.get())
if isinstance(data, kernel.SLILiteral):
# Get name of SLILiteral.
return data.name
if isinstance(data, (list, tuple)):
return [serializable(d) for d in data]
if isinstance(data, dict):
return dict([(key, serializable(value)) for key, value in data.items()])
return data
def to_json(data, **kwargs):
"""Serialize data to JSON.
Parameters
----------
data : any
kwargs : keyword argument pairs
Named arguments of parameters for `json.dumps` function.
Returns
-------
data_json : str
JSON format of the data
"""
data_serialized = serializable(data)
data_json = json.dumps(data_serialized, **kwargs)
return data_json
|
gpl-2.0
|
danstowell/gmphd
|
syntheticroc.py
|
1
|
4434
|
#!/usr/bin/env python
# script to generate ROC plot from multiple GM-PHD runs with different bias levels
# (c) 2012 Dan Stowell and Queen Mary University of London.
"""
This file is part of gmphd, GM-PHD filter in python by Dan Stowell.
gmphd is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
gmphd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with gmphd. If not, see <http://www.gnu.org/licenses/>.
"""
from gmphd import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from syntheticexamplestuff import *
###############################################################
# user config options:
niters = 100
nruns = 20
birthprob = 0.1 # 0.05 # 0 # 0.2
survivalprob = 0.975 # 0.95 # 1
detectprob = 0.95# 0.999
clutterintensitytot = 5 #2 #4 # typical num clutter items per frame
biases = [1, 2, 4, 8, 16] # tendency to prefer false-positives over false-negatives in the filtered output
obsntypenames = ['chirp', 'spect']
transntype = 'vibrato' # 'fixedvel' or 'vibrato'
###############################################################
# setting up variables
transnmatrix = transntypes[transntype]
birthintensity1 = birthprob / len(birthgmm)
print "birthgmm: each component has weight %g" % birthintensity1
for comp in birthgmm:
comp.weight = birthintensity1
rocpoints = {}
for obsntype in obsntypenames:
obsnmatrix = obsntypes[obsntype]['obsnmatrix']
directlystatetospec = dot(obsntypes[obsntype]['obstospec'], obsnmatrix)
clutterintensity = clutterintensityfromtot(clutterintensitytot, obsntype)
print "clutterintensity: %g" % clutterintensity
rocpoints[obsntype] = [(0,0)]
# NOTE: all the runs are appended into one long "results" array! Can calc the roc point in one fell swoop, no need to hold separate.
# So, we concatenate one separate resultlist for each bias type.
# Then when we come to the end we calculate a rocpoint from each resultlist.
results = { bias: [] for bias in biases }
for whichrun in range(nruns):
print "===============================obsntype %s, run %i==============================" % (obsntype, whichrun)
### Initialise the true state and the model:
trueitems = []
g = Gmphd(birthgmm, survivalprob, 0.7, transnmatrix, 1e-9 * array([[1,0,0], [0,1,0], [0,0,1]]),
obsnmatrix, obsntypes[obsntype]['noisecov'], clutterintensity)
for whichiter in range(niters):
print "--%i----------------------------------------------------------------------" % whichiter
# the "real" state evolves
trueitems = updatetrueitems(trueitems, survivalprob, birthprob, obsnmatrix, transnmatrix)
# we make our observations of it
(obsset, groundtruth) = getobservations(trueitems, clutterintensitytot, obsntype, directlystatetospec, detectprob)
print "OBSSET sent to g.update():"
print obsset
# we run our inference using the observations
updateandprune(g, obsset)
for bias in biases:
resultdict = collateresults(g, obsset, bias, obsntype, directlystatetospec, trueitems, groundtruth)
results[bias].append(resultdict)
for bias in biases:
gt = [moment['groundtruth'] for moment in results[bias]]
ob = [moment['estspec'] for moment in results[bias]]
rocpoints[obsntype].append(calcroc(gt, ob))
print "rocpoints"
print rocpoints
rocpoints[obsntype].append((1,1))
###############################################################
# plot the results
fig = plt.figure()
plt.hold(True)
plt.plot([p[0] for p in rocpoints['spect']], [p[1] for p in rocpoints['spect']], 'r+--', label='spect')
plt.plot([p[0] for p in rocpoints['chirp']], [p[1] for p in rocpoints['chirp']], 'b*-', label='chirp')
plt.legend(loc=4)
plt.title('GMPHD, synthetic data (%s, %i runs per point, avg clutter %g)' % (transntype, nruns, clutterintensitytot))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.xlim(xmin=0, xmax=0.4)
plt.ylim(ymin=0, ymax=1)
plt.savefig("plot_synthroc.pdf", papertype='A4', format='pdf')
fig.show()
raw_input("Press Enter to continue...")
|
gpl-3.0
|
cygnushan/measurement
|
SC_spectrum/Rt_mplCanvas.py
|
1
|
3063
|
# -*- coding: utf-8 -*-
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei'] #指定默认字体
mpl.rcParams['axes.unicode_minus'] = False #解决保存图像是负号'-'显示为方块的问题
import qmdz_const
class Rt_Canvas(FigureCanvas):
def __init__(self):
self.fig = Figure(figsize=(6,4), dpi=80,facecolor="white")
self.ax = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.ax.set_title(u"R-t谱")
self.ax.set_xlabel(u"时间(S)",labelpad=0)
self.ax.set_ylabel(u"电阻(Ω)",labelpad=0)
self.curveObj = None
def setxy(self,xmin,xmax,ymin,ymax):
self.ax.set_xlim(xmin,xmax)
self.ax.set_ylim(ymin,ymax)
self.draw()
def set_ylog(self, islog, ytype):
if islog:
self.ax.set_ylabel(u"电阻(logR)",labelpad=0)
self.draw()
def plot(self, datax, datay, style, label, type):
if type == 1:
self.curveObj,= self.ax.plot(datax,datay,style,label=label)
self.ax.legend(loc=1,fontsize='x-small')
else:
self.curveObj.set_data(datax,datay)
self.ax.relim()
self.ax.autoscale_view(True,True,True)
self.draw()
def save_pic(self, path):
self.fig.savefig(path)
def clear_lines(self):
from weakref import ref
# wr = ref(ax.lines[0])
# ax.lines.remove(wr())
axline = []
if self.ax.legend_:
self.ax.legend_.remove()
for i, line in enumerate(self.ax.lines):
print i,line
# self.ax.lines.pop(i)
# del line
axline.append(ref(self.ax.lines[i]))
for line in axline:
self.ax.lines.remove(line())
self.draw()
class Rt_CanvasWidget(QtGui.QWidget):
def __init__(self , parent =None):
QtGui.QWidget.__init__(self, parent)
self.canvas = Rt_Canvas()
self.vbl = QtGui.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.setLayout(self.vbl)
self.dataX= []
self.dataY= []
def generateData(self,x,y, style='b', label='RT', type=0):
self.canvas.plot(x,y,style,label,type)
def change_xy(self, xmin, xmax, ymin, ymax):
self.canvas.setxy(xmin, xmax, ymin, ymax)
def set_log(self, islog, ylabe):
self.canvas.set_ylog(islog, ylabe)
def save_curve(self, path):
self.canvas.save_pic(path)
def clear_curve(self):
self.canvas.clear_lines()
# ax.semilogx(x,y) #x轴为对数坐标轴
#
# ax.semilogy(x,y) #y轴为对数坐标轴
#
# ax.loglog(x,y) #双对数坐标轴
|
mit
|
tectronics/pmtk3
|
python/demos/mkWebGraphDemo.py
|
26
|
2970
|
#!/usr/bin/python2.6
import os, sys
import networkx # http://networkx.lanl.gov/
import cPickle
'''Parse some html pages and build an adjacency matrix.
Written by Eric Brochu and Nando de Freitas.
Modified by Kevin Murphy, 20 Feb 2011.
'''
def parseFiles(folder):
'''Make a dictionary, keys are filenames, value is list of files that are pointed to'''
fnames = os.listdir(folder)
links = {}
for file in fnames:
links[file] = []
filename = os.path.join(folder, file)
print 'processing ', filename
f = open(filename, 'r')
for line in f.readlines():
while True:
p = line.partition('<a href="http://')[2]
if p=='':
break
(url, _, line) = p.partition('\">')
links[file].append(url)
print "file %s links to %s" % (file, url)
f.close()
return links
def mkGraph(mydict):
'''Convert dictionary into weighted digraph'''
DG = networkx.DiGraph()
DG.add_nodes_from(mydict.keys())
edges = []
for key, values in mydict.iteritems():
eweight = {}
# for each node on our list of values, increment a counter
for v in values:
if v in eweight:
eweight[v] += 1
else:
eweight[v] = 1
# for each unique target we connect to, create a weighted edge
for succ, weight in eweight.iteritems():
edges.append([key, succ, {'weight':weight}])
DG.add_edges_from(edges)
return DG
def plotGraph(DG):
'''Visualize network'''
pmtkFigureFolder = os.environ['PMTKFIGUREFOLDER']
import matplotlib.pyplot as plt
plt.figure(figsize=(9,9))
pos=networkx.spring_layout(DG,iterations=10)
#networkx.draw(DG,pos,node_size=0,alpha=0.4,edge_color='r', font_size=16)
networkx.draw_circular(DG)
plt.savefig(os.path.join(pmktFigureFolder, "link_graph.pdf"))
plt.show()
#def pmtkInit():
# pmtkFolder = os.environ['PMTKPYTHONHOME']
# execfile(os.path.join(pmtkFolder, 'pmtk3PythonInit.py'))
def DGtoAdjMat(DG)
NX = DG.nnodes()
fnames = DG.nodes()
T = matrix(numpy.zeros((NX, NX)))
# Map from names to numbers
f2i = dict((fn, i) for i, fn in enumerate(fnames))
for predecessor, successors in DG.adj.iteritems():
for s, edata in successors.iteritems():
T[f2i[predecessor], f2i[s]] = edata['weight']
return T
def main():
#pmtkInit()
pmtkDataFolder = os.environ['PMTKDATAFOLDER']
mydict = parseFiles(os.path.join(pmtkDataFolder, 'smallWeb'))
fnames = mydict.keys()
DG = mkGraph(mydict)
plotGraph(DG)
#pmtkTmpFolder = os.environ['PMTKTMPFOLDER']
# Save file
tmpName = os.path.join(pmtkDataFolder, 'smallWeb', 'DG.pkl')
cPickle.dump(DG, open(tmpName, 'w'))
# DG = cPickle.load(fname)
DGtoAdjMat(DG)
if __name__ == '__main__':
main()
|
mit
|
square/pysurvival
|
pysurvival/models/simulations.py
|
1
|
16325
|
from __future__ import absolute_import
import numpy as np
import pandas as pd
import random
import scipy
import copy
from pysurvival import utils
from pysurvival.models import BaseModel
# %matplotlib inline
# List of Survival Distributions
DISTRIBUTIONS = ['Exponential',
'Weibull',
'Gompertz',
'Log-Logistic',
'Log-Normal',]
# List of risk types
RISK_TYPES = ['Linear', 'Square', 'Gaussian']
class SimulationModel(BaseModel):
"""
A general framework for simulating right-censored survival data
for proportional hazards models by incorporating
* a baseline hazard function from a known survival distribution,
* a set of covariates.
The framework is based on "Generating Survival Times to Simulate
Cox Proportional Hazards Models"
https://www.ncbi.nlm.nih.gov/pubmed/22763916
The formula for the different survival times and functions, and hazard
functions can be found at :
http://data.princeton.edu/pop509/ParametricSurvival.pdf
Parameters:
-----------
* survival_distribution: string (default = 'exponential')
Defines a known survival distribution. The available options are:
- Exponential
- Weibull
- Gompertz
- Log-Logistic
- Log-Normal
* risk_type: string (default='linear')
Defines the type of risk function. The available options are:
- Linear
- Square
- Gaussian
* alpha: double (default = 1.)
the scale parameter
* beta: double (default = 1.)
the shape parameter
* bins: int (default=100)
the number of bins of the time axis
* censored_parameter: double (default = 1.)
coefficient used to calculate the censored distribution. This
distribution is a normal such that N(loc=censored_parameter, scale=5)
* risk_parameter: double (default = 1.)
Scaling coefficient of the risk score such that:
- linear: r(x) = exp(<x, W>)
- square: r(x) = exp(risk_parameter*(<x, W>)^2)
- gaussian: r(x) = exp( exp(-(<x, W>)^2/risk_parameter) )
<.,.> is the dot product
"""
def __init__(self, survival_distribution = 'exponential',
risk_type = 'linear', censored_parameter = 1., alpha = 1, beta = 1.,
bins = 100, risk_parameter = 1.):
# Saving the attributes
self.censored_parameter = censored_parameter
self.alpha = alpha
self.beta = beta
self.risk_parameter = risk_parameter
self.bins = bins
self.features = []
# Checking risk_type
if any([risk_type.lower() == r.lower() for r in RISK_TYPES ]):
self.risk_type = risk_type
else:
error = "{} isn't a valid risk type. "
error += "Only {} are currently available."
error = error.format(risk_type, ", ".join(RISK_TYPES))
raise NotImplementedError(error)
# Checking distribution
if any([survival_distribution.lower() == d.lower() \
for d in DISTRIBUTIONS ]):
self.survival_distribution = survival_distribution
else:
error = "{} isn't a valid survival distribution. "
error += "Only {} are currently available."
error = error.format(survival_distribution,", ".join(DISTRIBUTIONS))
raise NotImplementedError(error)
# Initializing the elements from BaseModel
super(SimulationModel, self).__init__(auto_scaler = True)
@staticmethod
def random_data(N):
"""
Generating a array of size N from a random distribution -- the available
distributions are:
* binomial,
* chisquare,
* exponential,
* gamma,
* normal,
* uniform
* laplace
"""
index = np.random.binomial(n = 4, p = 0.5)
distributions = {
'binomial_a': np.random.binomial(n = 20, p = 0.6, size = N ),
'binomial_b': np.random.binomial(n = 200, p = 0.6, size = N ),
'chisquare': np.random.chisquare(df = 10, size = N ),
'exponential_a': np.random.exponential(scale=0.1, size = N ),
'exponential_b': np.random.exponential(scale=0.01, size = N ),
'gamma': np.random.gamma(shape=2., scale=2., size = N ),
'normal_a': np.random.normal(loc=-1.0, scale=5.0, size=N ),
'normal_b': np.random.normal(loc=10.0, scale=10.0, size=N ),
'uniform_a': np.random.uniform(low=-2.0, high=10.0, size=N ),
'uniform_b': np.random.uniform(low=-20.0, high=100.0, size=N ),
'laplace': np.random.laplace(loc=0.0, scale=1.0, size=N )
}
list_distributions = copy.deepcopy(list(distributions.keys()))
random.shuffle(list_distributions)
key = list_distributions[ index ]
return key, distributions[key]
def time_function(self, BX):
"""
Calculating the survival times based on the given distribution
T = H^(-1)( -log(U)/risk_score ), where:
* H is the cumulative baseline hazard function
(H^(-1) is the inverse function)
* U is a random variable uniform - Uni[0,1].
The method is inspired by https://gist.github.com/jcrudy/10481743
"""
# Calculating scale coefficient using the features
num_samples = BX.shape[0]
lambda_exp_BX = np.exp(BX)*self.alpha
lambda_exp_BX = lambda_exp_BX.flatten()
# Generating random uniform variables
U = np.random.uniform(0, 1, num_samples)
# Exponential
if self.survival_distribution.lower().startswith('exp') :
self.survival_distribution = 'Exponential'
return - np.log( U )/( lambda_exp_BX )
# Weibull
elif self.survival_distribution.lower().startswith('wei') :
self.survival_distribution = 'Weibull'
return np.power( - np.log( U )/( lambda_exp_BX ), 1./self.beta )
# Gompertz
elif self.survival_distribution.lower().startswith('gom') :
self.survival_distribution = 'Gompertz'
return ( 1./self.beta)*\
np.log( 1 - self.beta*np.log( U )/(lambda_exp_BX) )
# Log-Logistic
elif 'logistic' in self.survival_distribution.lower() :
self.survival_distribution = 'Log-Logistic'
return np.power( U/(1.-U), 1./self.beta )/(lambda_exp_BX )
# Log-Normal
elif 'normal' in self.survival_distribution.lower() :
self.survival_distribution = 'Log-Normal'
W = np.random.normal(0, 1, num_samples)
return lambda_exp_BX*np.exp(self.beta*W)
def hazard_function(self, t, BX):
""" Calculating the hazard function based on the given distribution """
# Calculating scale coefficient using the features
_lambda = self.alpha*np.exp( BX )
# Exponential
if self.survival_distribution.lower().startswith( 'exp' ) :
return np.repeat(_lambda, len(t))
# Weibull
elif self.survival_distribution.lower().startswith('wei'):
return _lambda*self.beta*np.power( t, self.beta-1 )
# Gompertz
elif self.survival_distribution.lower().startswith('gom'):
return _lambda*np.exp(self.beta*t )
# Log-Logistic
elif self.survival_distribution.lower().endswith('logistic'):
numerator = _lambda*self.beta*np.power((_lambda*t), self.beta-1 )
denominator = (1 + np.power( (_lambda*t), self.beta) )
return numerator/denominator
# Log-Normal
elif self.survival_distribution.lower().endswith('normal'):
arg_normal = (np.log(t) - np.log(_lambda))/self.beta
numerator = (1./(t*self.beta))*scipy.stats.norm.pdf( arg_normal )
denominator = 1. - scipy.stats.norm.cdf(arg_normal)
return numerator/denominator
def survival_function(self, t, BX):
"""
Calculating the survival function based on the given
distribution
"""
# Calculating scale coefficient using the features
_lambda = self.alpha*np.exp( BX )
# Exponential
if self.survival_distribution.lower().startswith( 'exp' ) :
return np.exp( -t*_lambda )
# Weibull
elif self.survival_distribution.lower().startswith('wei'):
return np.exp( -np.power(t, self.beta)*_lambda )
# Gompertz
elif self.survival_distribution.lower().startswith('gom'):
return np.exp( -_lambda/self.beta*( np.exp(self.beta*t) - 1) )
# Log-Logistic
elif self.survival_distribution.lower().endswith('logistic'):
return 1./(1.+ np.power(_lambda*t, self.beta) )
# Log-Normal
elif self.survival_distribution.lower().endswith('normal'):
arg_cdf = (np.log(t) - np.log(_lambda))/self.beta
return 1. - scipy.stats.norm.cdf(arg_cdf)
def risk_function(self, x_std):
""" Calculating the risk function based on the given risk type """
# Dot product
risk = np.dot( x_std, self.feature_weights )
# Choosing the type of risk
if self.risk_type.lower() == 'linear' :
return risk.reshape(-1, 1)
elif self.risk_type.lower() == 'square' :
risk = np.square(risk*self.risk_parameter)
elif self.risk_type.lower() == 'gaussian' :
risk = np.square(risk)
risk = np.exp( - risk*self.risk_parameter)
return risk.reshape(-1, 1)
def generate_data(self, num_samples = 100, num_features = 3,
feature_weights = None):
"""
Generating a dataset of simulated survival times from a given
distribution through the hazard function using the Cox model
Parameters:
-----------
* `num_samples`: **int** *(default=100)* --
Number of samples to generate
* `num_features`: **int** *(default=3)* --
Number of features to generate
* `feature_weights`: **array-like** *(default=None)* --
list of the coefficients of the underlying Cox-Model.
The features linked to each coefficient are generated
from random distribution from the following list:
* binomial
* chisquare
* exponential
* gamma
* normal
* uniform
* laplace
If None then feature_weights = [1.]*num_features
Returns:
--------
* dataset: pandas.DataFrame
dataset of simulated survival times, event status and features
Example:
--------
from pysurvival.models.simulations import SimulationModel
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'gompertz',
risk_type = 'linear',
censored_parameter = 5.0,
alpha = 0.01,
beta = 5., )
# Generating N Random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features=5)
# Showing a few data-points
dataset.head()
"""
# Data parameters
self.num_variables = num_features
if feature_weights is None :
self.feature_weights = [1.]*self.num_variables
feature_weights = self.feature_weights
else:
feature_weights = utils.check_data(feature_weights)
if num_features != len(feature_weights):
error = "The length of feature_weights ({}) "
error += "and num_features ({}) are not the same."
error = error.format(len(feature_weights), num_features)
raise ValueError(error)
self.feature_weights = feature_weights
# Generating random features
# Creating the features
X = np.zeros((num_samples, self.num_variables))
columns = []
for i in range( self.num_variables ) :
key, X[:, i] = self.random_data(num_samples)
columns.append( 'x_' + str(i+1) )
X_std = self.scaler.fit_transform( X )
BX = self.risk_function( X_std )
# Building the survival times
T = self.time_function(BX)
C = np.random.normal( loc = self.censored_parameter,
scale = 5, size = num_samples )
C = np.maximum(C, 0.)
time = np.minimum( T, C )
E = 1.*(T == time)
# Building dataset
self.features = columns
self.dataset = pd.DataFrame( data = np.c_[X, time, E],
columns = columns + ['time', 'event'] )
# Building the time axis and time buckets
self.times = np.linspace(0., max(self.dataset['time']), self.bins)
self.get_time_buckets()
# Building baseline functions
self.baseline_hazard = self.hazard_function(self.times, 0)
self.baseline_survival = self.survival_function(self.times, 0)
# Printing summary message
message_to_print = "Number of data-points: {} - Number of events: {}"
print( message_to_print.format(num_samples, sum(E)) )
return self.dataset
def predict(self, x, t = None):
"""
Predicting the hazard, density and survival functions
Parameters:
-----------
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
* t: float (default=None)
Time at which hazard, density and survival functions
should be calculated. If None, the method returns
the functions for all times t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
else:
# Ensuring x has 2 dimensions
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Calculating risk_score, hazard, density and survival
BX = self.risk_function(x)
hazard = self.hazard_function(self.times, BX.reshape(-1, 1))
survival = self.survival_function(self.times, BX.reshape(-1, 1))
density = (hazard*survival)
if t is None:
return hazard, density, survival
else:
min_abs_value = [abs(a_j_1-t) for (a_j_1, a_j) in self.time_buckets]
index = np.argmin(min_abs_value)
return hazard[:, index], density[:, index], survival[:, index]
def predict_risk(self, x):
"""
Predicting the risk score function
Parameters:
-----------
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
else:
# Ensuring x has 2 dimensions
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Calculating risk_score
risk_score = self.risk_function(x)
return risk_score
|
apache-2.0
|
vineet-rh/incubator-airflow
|
docs/conf.py
|
33
|
8957
|
# -*- coding: utf-8 -*-
#
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
'apiclient',
'apiclient.discovery',
'apiclient.http',
'mesos',
'mesos.interface',
'mesos.native',
'oauth2client.service_account',
'pandas.io.gbq',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
from airflow import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
#copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0.0'
# The full version, including alpha/beta/rc tags.
#release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Maxime Beauchemin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Maxime Beauchemin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Maxime Beauchemin', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
),]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
apache-2.0
|
mjgrav2001/scikit-learn
|
examples/plot_multilabel.py
|
236
|
4157
|
# Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.