repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
binghongcha08/pyQMD | GWP/2D/1.0.2/traj.py | 14 | 1289 | #!/usr/bin/python
import numpy as np
import pylab as plt
import seaborn as sns
sns.set_context("poster")
#with open("traj.dat") as f:
# data = f.read()
#
# data = data.split('\n')
#
# x = [row.split(' ')[0] for row in data]
# y = [row.split(' ')[1] for row in data]
#
# fig = plt.figure()
#
# ax1 = fig.add_subplot(111)
#
# ax1.set_title("Plot title...")
# ax1.set_xlabel('your x label..')
# ax1.set_ylabel('your y label...')
#
# ax1.plot(x,y, c='r', label='the data')
#
# leg = ax1.legend()
#fig = plt.figure()
plt.subplot(121)
#plt.ylim(-8,8)
data = np.genfromtxt(fname='q.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
#plt.figure(1)
#plt.plot(x,y1,'-')
#plt.plot(x,y2,'g-')
plt.xlabel('time')
#plt.ylabel('position')
#plt.title('traj')
ax2 = plt.subplot(122)
data = np.genfromtxt(fname='c.dat')
#data = np.loadtxt('traj.dat')
for x in range(1,data.shape[1]):
plt.plot(data[:,0],data[:,x])
plt.xlabel('time')
ax2.yaxis.tick_right()
ax2.yaxis.set_ticks_position('both')
plt.ylim(-0.2,5)
#plt.subplot(2,2,3)
#data = np.genfromtxt(fname='norm')
#plt.plot(data[:,0],data[:,1],'r-',linewidth=2)
#plt.ylabel('Norm')
#plt.ylim(0,2)
plt.legend()
plt.savefig('traj.pdf')
plt.show()
| gpl-3.0 |
zaxtax/scikit-learn | benchmarks/bench_isolation_forest.py | 40 | 3136 | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http']#, 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(bootstrap=True, n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = model.predict(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC for %s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
glennq/scikit-learn | sklearn/externals/joblib/__init__.py | 23 | 5101 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.10.3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
ky822/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 244 | 1593 | import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
| bsd-3-clause |
shaunwbell/FOCI_Analysis | ReanalysisRetreival_orig/GOA_transplort_plot.py | 1 | 5933 | #!/usr/bin/env python
"""
GOA_transplort_plot.py
Plot transport data from GOA for 2015 ngoa paper.
Reads EPIC netcdf of transport as a function of time
"""
# Standard library.
import datetime, sys
# System Stack
import argparse
# Scientific stack.
import numpy as np
from netCDF4 import Dataset
# Visual Stack
import matplotlib.pyplot as plt
from matplotlib.dates import MonthLocator, DateFormatter
__author__ = 'Shaun Bell'
__email__ = '[email protected]'
__created__ = datetime.datetime(2015, 06, 17)
__modified__ = datetime.datetime(2015, 06, 17)
__version__ = "0.1.0"
__status__ = "Development"
"""--------------------------------netcdf Routines---------------------------------------"""
def get_global_atts(nchandle):
g_atts = {}
att_names = nchandle.ncattrs()
for name in att_names:
g_atts[name] = nchandle.getncattr(name)
return g_atts
def get_vars(nchandle):
return nchandle.variables
def ncreadfile_dic(nchandle, params):
data = {}
for j, v in enumerate(params):
if v in nchandle.variables.keys(): #check for nc variable
data[v] = nchandle.variables[v][:]
else: #if parameter doesn't exist fill the array with zeros
data[v] = None
return (data)
"""--------------------------------time Routines---------------------------------------"""
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
"""------------------------------- MAIN ----------------------------------------"""
parser = argparse.ArgumentParser(description='EPIC NetCDF Timeseries Plot')
parser.add_argument('DataPath', metavar='DataPath', type=str, help='full path to file')
parser.add_argument("-monthly_average",'--monthly_average', action="store_true", help='calculate monthly average')
parser.add_argument("-annual_signal",'--annual_signal', action="store_true", help='calculate total annual signal')
args = parser.parse_args()
###nc readin
nchandle = Dataset(args.DataPath,'a')
global_atts = get_global_atts(nchandle)
vars_dic = get_vars(nchandle)
data1 = ncreadfile_dic(nchandle, vars_dic.keys())
nchandle.close()
### convert epic time to python serialdate
time1 = date2pydate(data1['time'],data1['time2'])
xdata = data1['TR_388'][:,0,0,0]
label = 'TR_388'
scale = 10**6
if args.monthly_average or args.annual_signal:
proc_data = {}
ave_data = {}
ave_data_stats = {}
months = ['01','02','03','04','05','06','07','08','09','10','11','12']
for i,v in enumerate(time1):
proc_data[datetime.datetime.fromordinal(int(v)).strftime('%Y-%m-%d')] = data1['TR_388'][i,0,0,0]
for years in np.arange(1984,2007):
for month in months:
sum = 0
count = 0
for k in proc_data.keys():
if str(years)+'-'+month in k:
if proc_data[k] < 1e30:
count += 1
sum = sum + proc_data[k]
print str(years)+'-'+month+'-15'
if count != 0:
ave_data[str(years)+'-'+month+'-15'] = sum / scale / count
ave_data_stats[str(years)+'-'+month+'-15'] = count
else:
ave_data[str(years)+'-'+month+'-15'] = 1e35
ave_data_stats[str(years)+'-'+month+'-15'] = 0
time1 = []
xdata = []
### build x and y data
# for x data we want all the months of the entire record to show up at the same 'x-point' so every feb for all years is collocated
for k in sorted(ave_data.keys()):
time1 = time1 + [datetime.datetime.strptime("2000-"+"-".join(k.split('-')[1:]),'%Y-%m-%d').toordinal()]
xdata = xdata + [ave_data[k]]
time1 = np.array(time1)
xdata = np.array(xdata)
if args.annual_signal:
timet = []
xdatat = []
for month in months:
time_base = datetime.datetime.strptime("2000-"+month+"-15",'%Y-%m-%d').toordinal()
ave_ind = np.where(time1==time_base) #arbitrary 2000 year chosen above for plotting
dave_ind = np.where(xdata[ave_ind] != 1e35)
timet = timet + [time_base]
xdatat = xdatat + [np.mean(xdata[ave_ind][dave_ind])]
time1 = np.array(timet)
xdata = np.array(xdatat)
### plot variable against time
fig = plt.figure(2)
ax2 = plt.subplot2grid((3, 1), (1, 0), colspan=1, rowspan=3)
p2 = ax2.plot(time1, xdata,'r.', markersize=12)
#ax2.set_ylim([xdata[xdata != 1e35].min()-0.5,xdata[xdata != 1e35].max()+0.5])
ax2.set_ylim([-2,3])
ax2.set_xlim([datetime.datetime.strptime("2000-1-1",'%Y-%m-%d').toordinal(),datetime.datetime.strptime("2001-1-1",'%Y-%m-%d').toordinal()])
plt.ylabel(label)
ax2.xaxis.set_major_locator(MonthLocator(interval=1))
ax2.xaxis.set_minor_locator(MonthLocator())
ax2.xaxis.set_major_formatter(DateFormatter('%b'))
fig.autofmt_xdate()
fullpath = '/Users/bell/temp/'
user_append = 'minus_downstream_monthlyaverage_ma'
plt.savefig(fullpath + user_append + '_ngoa_fig8.svg', bbox_inches='tight', dpi = (300))
plt.close()
| mit |
traxys/EyeTrackingInfo | mainProg.py | 1 | 2118 | import numpy as np
#Pour charger , et gerer les images
from PIL import Image, ImageTk
import tkinter as tk
from time import sleep
#Ca fait joli
import progressbar
#Temps d'attente entre chaque iteration
LOOP_DELAY = 10
#Salle en 360 degré par dessus laquelle on peut superposer le cube
def loadRoom(hasBar=True):
'''Returns an array containg 359 frames as PIL objects'''
room = []
if hasBar:
bar = progressbar.ProgressBar(
widgets=[
"Loading Room",
progressbar.Bar(),
"(",progressbar.ETA(),")"
],
max_value=360)
name = "Loading Room"
for i in bar(range(1,361)):
room.append( Image.open("room/deg_"+str(i)+".png") )
else:
for i in range(1,361):
room.append( Image.open("room/deg_"+str(i)+".png") )
return room
#Entrée et sortie de la boucle
runTrack = 0
def stopTrack():
'''Tk event func'''
global runTrack
runTrack = -1
def startTrack():
'''Tk event func'''
global runTrack
runTrack = 1
#Boucle principal du programme
def eyeTrack():
'''Main loop , called by tk'''
global runTrack
if runTrack == 1:
btnStart.pack_forget()
btnEnd = tk.Button(root,text="Stop",command=stopTrack)
btnEnd.pack()
runTrack = 2
if runTrack == 2:
#Appel des Fonctions Ici , le reste est pour gerer la boucle
print("In loop")
#Fin de la zone d'appel des fonctions
root.update_idletasks()
root.after(LOOP_DELAY,eyeTrack)
elif runTrack == 0:
root.after(LOOP_DELAY,eyeTrack)
else:
#Termine le programme
quit()
print("(1) Mode matplotlib\n(2) Mode superposition")
mode = 0
while not (mode == 1 or mode == 2):
try:
mode = int(input("Mode :"))
except Exception:
pass
if not (mode == 1 or mode == 2):
print("Entrée Incorecte",end=' , ')
#Mode avec Tkinter
if mode == 2:
#Mise en place du GUI
root = tk.Tk()
label = tk.Label(root)
label.pack()
btnStart = tk.Button(root,text="Start",command=startTrack)
btnStart.pack()
#Chargement de la salle avant le lancement du GUI
room = loadRoom(False)
#Lancement initiale de la boucle
root.after(0,eyeTrack)
#Lancement du GUI Tkinter , tout code ecrit apres cette ligne est inutile
root.mainloop() | mit |
nicholasmalaya/arcanus | uq/ps2/infer.py | 2 | 3735 | #!/usr/bin/env python
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
import pylab
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
from scipy import stats
import sys
# local files that will be imported
import likelihood
from settings_infer import *
from prior import *
# -------------------------------------------------------------
# subroutine that generates a .pdf file plotting a prior and posterior quantity
# -------------------------------------------------------------
def plotter(chain,ind,xmin=None,xmax=None):
from math import log, pi
bins = np.linspace(np.min(chain), np.max(chain), 200)
qkde = stats.gaussian_kde(chain)
qpdf = qkde.evaluate(bins)
# plot posterior
pyplot.figure()
pyplot.plot(bins, qpdf, linewidth=3, label="Post")
# plot prior (requires some cleverness to do in general)
qpr = [prior_funcs[i](x) for x in bins]
qpri = [np.exp(x) for x in qpr]
qpri=qpri/np.linalg.norm(qpri)
pyplot.plot(bins, qpri, linewidth=3, label="Prior")
# user specified bounds to x-range:
if(xmin != None and xmax != None):
bounds = np.array([xmin, xmax])
pyplot.xlim(bounds)
quant = qoi_list[i]
pyplot.xlabel(quant, fontsize=30)
pyplot.ylabel('$\pi('+quant+')$', fontsize=30)
pyplot.legend(loc='upper left')
pyplot.savefig(quant+'_post.pdf', bbox_inches='tight')
# -------------------------------------------------------------
# MCMC sampling Function
# -------------------------------------------------------------
class BayesianRichardsonExtrapolation(object):
"Computes the Bayesian Richardson extrapolation posterior log density."
def __call__(self, params, dtype=np.double):
from math import log
#print params[0], params[1], params[2]
return (
prior(params) +
likelihood.likelihood(params)
)
# -------------------------------------------------------------
# Main Function
# -------------------------------------------------------------
#
# Stop module loading when imported. Otherwise continue running.
if __name__ != '__main__': raise SystemExit, 0
# Example of sampling Bayesian Richardson extrapolation density using emcee
from emcee import EnsembleSampler
from math import ceil, floor, sqrt
#
# initalize the Bayesian Calibration Procedure
#
bre = BayesianRichardsonExtrapolation()
#print("\nInitializing walkers")
nwalk = 100
params0 = np.tile(guess_list, nwalk).reshape(nwalk, len(guess_list))
#
# perturb walkers around guess
#
for i in xrange(len(guess_list)):
params0.T[i] += np.random.rand(nwalk) * perturb_list[i]
# hack!
params0.T[2] = np.absolute(params0.T[2]) # ...and force >= 0
#print("\nInitializing the sampler and burning in walkers")
s = EnsembleSampler(nwalk, params0.shape[-1], bre, threads=4)
pos, prob, state = s.run_mcmc(params0, burn_in)
s.reset()
#print("\nSampling the posterior density for the problem")
s.run_mcmc(pos, samples)
samplea = s.flatchain[:,0]
pylab.plot(samplea)
pylab.xlabel('Step number')
pylab.ylabel('alpha')
pylab.show()
pylab.savefig('alpha.png')
samples = s.flatchain[:,1]
pylab.plot(samples)
pylab.xlabel('Step number')
pylab.ylabel('sigma')
pylab.show()
pylab.savefig('sigma.png')
sample = s.flatchain[:,2]
pylab.plot(sample)
pylab.xlabel('Step number')
pylab.ylabel('Value')
pylab.show()
pylab.savefig('cd.png')
filepath='sigma.dat'
f = open(filepath, 'w')
for item in samples:
f.write("%s\n" % item)
filepath='alpha.dat'
f = open(filepath, 'w')
for item in samplea:
f.write("%s\n" % item)
filepath='drag.dat'
f = open(filepath, 'w')
for item in sample:
f.write("%s\n" % item)
| mit |
yaoqr15/Machine-Learning-Based-Rumors-Detection | Keras_main/other_model/training_52.99%.py | 1 | 4628 | # -*- coding: utf-8 -*-
import pandas as pd
import re
import jieba
import jieba.analyse
import numpy as np
import copy
import datetime
from keras.models import Sequential
from keras.layers import Dense,Dropout,Activation,Embedding
from keras.layers import Conv1D,GlobalMaxPooling1D
from keras.utils.np_utils import to_categorical
global len_of_count
len_of_count = 0
short_text = 600
useless_word = [',',':','‘','’','','。','—','——',
'你','我','他','它','咱们','大家','自己',
'这','那','这儿','那边','各','每','的','了',
'谁','什么','哪','怎么','哪里','几','地']
#the function to get the pure word
def remove_mess(str):
p = re.compile('[a-zA-Z\"\'’\/\(\),:;~‘\(\)\|\-\*@#$%^&\[\]{}<>+`\s\n\r\\\\]')
return re.sub(p,'',str)
#the function to cut the word
def cut_the_word(str_in):
length = len(str_in)
if length <= short_text:
tmp = list(jieba.cut(str_in,cut_all=False))
word = [x for x in tmp if x not in useless_word]
return word
else:
cut = int(0.2*length)
tmp = str_in[:cut]+str_in[-cut:]
word = list(jieba.cut(tmp,cut_all=False))
word = [x for x in word if x not in useless_word]
return word
#the function to get word sequence
#gather the word and count,then assign each a number
def word_seq(x, maxlength):
global len_of_count
content = []
store = list(map(lambda s: cut_the_word(s), x[0]))
for i in store:
content += i
count = pd.Series(content).value_counts()
tmp = pd.DataFrame(count)
select = tmp[tmp[0] > 1]
count = copy.deepcopy(select[0])
count[:] = range(1, 1 + len(count))
count[''] = 0
len_of_count = len(count)
def doc2num(a, maxlength):
a = [i for i in a if i in count]
a += max(maxlength-len(a),0)*['']
return list(count[a])
x['seq'] = list(map(lambda s: doc2num(s, maxlength), store))
return list(x['seq'])
########################## Pre-processing period ############################
##################################################################################
#import the training data
fake = pd.read_excel("fake.xls", header=None)
truth = pd.read_excel("truth.xls", header=None)
not_sure = pd.read_excel("not_sure.xls", header=None)
#giving the label
fake['label'] = -1
truth['label'] = 1
not_sure['label'] = 0
#gather the data
all_text = fake.append(truth, ignore_index=True)
all_text = all_text.append(not_sure,ignore_index=True)
#get the pure word
all_text[0] = list(map(lambda s: remove_mess(s),all_text[0]))
#get the maxlen
all_text['len'] = list(map(lambda s:len(s),all_text[0]))
long_text = int(max(list(all_text['len']))*0.4*0.5)
maxlen = max(long_text,int(short_text*0.5))
#get the word sequence
all_text['seq'] = word_seq(all_text, maxlen)
##################################################################################
############################## Training period ###############################
##################################################################################
#shuffle the order
idx = list(range(len(all_text)))
np.random.shuffle(idx)
all_text = all_text.loc[idx]
print(all_text)
#initialize the training data
x_train = np.array(list(all_text['seq']))
y_train = np.array(list(all_text['label']))
y_train = to_categorical(y_train, num_classes=3)
#setup the parameter of the layers
embedding_vector_length = 32
batch_size = 32
nb_epoch = 10
nb_filter = 128
filter_length = 3
################ define the model ###############
model = Sequential()
#Embedding layer
print(len_of_count,maxlen)
model.add(Embedding(len_of_count,
embedding_vector_length,
input_length=maxlen))
#Convolution-1D layer
model.add(Conv1D(activation="relu",
filters=256,
kernel_size=3,
padding="valid"))
#Pooling layer
model.add(GlobalMaxPooling1D())
#Dense layer ,Dropout layer & Activation layer
model.add(Dense(128))
model.add(Dropout(0.2))
model.add(Activation('relu'))
#Output layer
model.add(Dense(3))
model.add(Activation('softmax'))
#compile the model
model.compile(loss='categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
#Fit the model
print('Train...')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=nb_epoch)
score = model.evaluate(x_train, y_train, verbose=0)
print('train score:', score[0])
print('train accuracy:', score[1])
time = str(datetime.datetime.now())[:10]
model.save(str(score[1])[:5]+'_'+time+'.h5')
| mit |
SpatialMetabolomics/SM_distributed | sm/engine/fdr.py | 2 | 4132 | import logging
import numpy as np
import pandas as pd
logger = logging.getLogger('engine')
DECOY_ADDUCTS = ['+He', '+Li', '+Be', '+B', '+C', '+N', '+O', '+F', '+Ne', '+Mg', '+Al', '+Si', '+P', '+S', '+Cl', '+Ar', '+Ca', '+Sc', '+Ti', '+V', '+Cr', '+Mn', '+Fe', '+Co', '+Ni', '+Cu', '+Zn', '+Ga', '+Ge', '+As', '+Se', '+Br', '+Kr', '+Rb', '+Sr', '+Y', '+Zr', '+Nb', '+Mo', '+Ru', '+Rh', '+Pd', '+Ag', '+Cd', '+In', '+Sn', '+Sb', '+Te', '+I', '+Xe', '+Cs', '+Ba', '+La', '+Ce', '+Pr', '+Nd', '+Sm', '+Eu', '+Gd', '+Tb', '+Dy', '+Ho', '+Ir', '+Th', '+Pt', '+Os', '+Yb', '+Lu', '+Bi', '+Pb', '+Re', '+Tl', '+Tm', '+U', '+W', '+Au', '+Er', '+Hf', '+Hg', '+Ta']
class FDR(object):
def __init__(self, job_id, decoy_sample_size, target_adducts, db):
self.job_id = job_id
self.decoy_sample_size = decoy_sample_size
self.db = db
self.target_adducts = target_adducts
self.td_df = None
self.fdr_levels = [0.05, 0.1, 0.2, 0.5]
self.random_seed = 42
def _decoy_adduct_gen(self, target_ions, decoy_adducts_cand):
np.random.seed(self.random_seed)
for sf, ta in target_ions:
for da in np.random.choice(decoy_adducts_cand, size=self.decoy_sample_size, replace=False):
yield (sf, ta, da)
def decoy_adducts_selection(self, target_ions):
decoy_adduct_cand = [add for add in DECOY_ADDUCTS if add not in self.target_adducts]
self.td_df = pd.DataFrame(self._decoy_adduct_gen(target_ions, decoy_adduct_cand),
columns=['sf', 'ta', 'da'])
def ion_tuples(self):
""" All ions needed for FDR calculation """
d_ions = self.td_df[['sf', 'da']].drop_duplicates().values.tolist()
t_ions = self.td_df[['sf', 'ta']].drop_duplicates().values.tolist()
return list(map(tuple, t_ions + d_ions))
@staticmethod
def _msm_fdr_map(target_msm, decoy_msm):
target_msm_hits = pd.Series(target_msm.msm.value_counts(), name='target')
decoy_msm_hits = pd.Series(decoy_msm.msm.value_counts(), name='decoy')
msm_df = pd.concat([target_msm_hits, decoy_msm_hits], axis=1).fillna(0).sort_index(ascending=False)
msm_df['target_cum'] = msm_df.target.cumsum()
msm_df['decoy_cum'] = msm_df.decoy.cumsum()
msm_df['fdr'] = msm_df.decoy_cum / msm_df.target_cum
return msm_df.fdr
def _digitize_fdr(self, fdr_df):
df = fdr_df.copy().sort_values(by='msm', ascending=False)
msm_levels = [df[df.fdr < fdr_thr].msm.min() for fdr_thr in self.fdr_levels]
df['fdr_d'] = 1.
for msm_thr, fdr_thr in zip(msm_levels, self.fdr_levels):
row_mask = np.isclose(df.fdr_d, 1.) & np.greater_equal(df.msm, msm_thr)
df.loc[row_mask, 'fdr_d'] = fdr_thr
df['fdr'] = df.fdr_d
return df.drop('fdr_d', axis=1)
def estimate_fdr(self, sf_adduct_msm_df):
logger.info('Estimating FDR')
all_sf_adduct_msm_df = (pd.DataFrame(self.ion_tuples(), columns=['sf', 'adduct'])
.set_index(['sf', 'adduct']).sort_index())
all_sf_adduct_msm_df = all_sf_adduct_msm_df.join(sf_adduct_msm_df).fillna(0)
target_fdr_df_list = []
for ta in self.target_adducts:
target_msm = all_sf_adduct_msm_df.loc(axis=0)[:, ta]
full_decoy_df = self.td_df[self.td_df.ta == ta][['sf', 'da']]
msm_fdr_list = []
for i in range(self.decoy_sample_size):
decoy_subset_df = full_decoy_df[i::self.decoy_sample_size]
sf_da_list = [tuple(row) for row in decoy_subset_df.values]
decoy_msm = all_sf_adduct_msm_df.loc[sf_da_list]
msm_fdr = self._msm_fdr_map(target_msm, decoy_msm)
msm_fdr_list.append(msm_fdr)
msm_fdr_avg = pd.Series(pd.concat(msm_fdr_list, axis=1).median(axis=1), name='fdr')
target_fdr = self._digitize_fdr(target_msm.join(msm_fdr_avg, on='msm'))
target_fdr_df_list.append(target_fdr.drop('msm', axis=1))
return pd.concat(target_fdr_df_list, axis=0)
| apache-2.0 |
hsiaoyi0504/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
jrversteegh/softsailor | deps/scipy-0.10.0b2/scipy/signal/filter_design.py | 5 | 65733 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'filter_dict', 'band_dict', 'BadCoefficients']
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is True, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> import scipy.signal
>>> b = sp.signal.firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = sp.signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = map(atleast_1d, (b, a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if len(b.shape) == 1:
b = asarray([b], b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:, 0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:, 0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:, 1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0,
ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
# pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z, p, k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b, a, wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b, a, wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b, a, wo=wo, bw=bw)
else: # 'bandstop'
b, a = lp2bs(b, a, wo=wo, bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b, a)
else:
return b, a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp * pi / 2.0)
stopb = tan(ws * pi / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ((10 ** (0.1 * abs(gstop)) - 1) ** (1.0 / (2.0 * ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi * wp / 2.)
stopb = tan(pi * ws / 2.)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least
gstop dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp * 1.0
stopb = ws * 1.0
else:
passb = tan(wp * pi / 2.0)
stopb = tan(ws * pi / 2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1, N + 1)
p = numpy.exp(1j * (2 * n - 1) / (2.0 * N) * pi) * 1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
n = numpy.arange(1, N + 1)
mu = 1.0 / N * numpy.log((1.0 + numpy.sqrt(1 + eps * eps)) / eps)
theta = pi / 2.0 * (2 * n - 1.0) / N
p = (-numpy.sinh(mu) * numpy.sin(theta) +
1j * numpy.cosh(mu) * numpy.cos(theta))
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1, N - 1, 2),
numpy.arange(N + 2, 2 * N, 2)))
else:
m = N
n = numpy.arange(1, 2 * N, 2)
z = conjugate(1j / cos(n * pi / (2.0 * N)))
p = exp(1j * (pi * numpy.arange(1, 2 * N, 2) / (2.0 * N) + pi / 2.0))
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
wp = 1
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1 - m
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = []
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229 + .4999999999999999999999996j,
-.8660254037844386467637229 - .4999999999999999999999996j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907 - .7113666249728352680992154j,
-.7456403858480766441810907 + .7113666249728352680992154j]
elif N == 4:
p = [-.6572111716718829545787781 - .8301614350048733772399715j,
-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642637 - .2709187330038746636700923j,
-.9047587967882449459642624 + .2709187330038746636700926j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677 - .4427174639443327209850002j,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 - .9072067564574549539291747j,
-.5905759446119191779319432 + .9072067564574549539291747j]
elif N == 6:
p = [-.9093906830472271808050953 - .1856964396793046769246397j,
-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 - .5621717346937317988594118j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 - .9616876881954277199245657j,
-.5385526816693109683073792 + .9616876881954277199245657j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340 - .3216652762307739398381830j,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 - .6504696305522550699212995j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 - 1.002508508454420401230220j,
-.4966917256672316755024763 + 1.002508508454420401230220j]
elif N == 8:
p = [-.9096831546652910216327629 - .1412437976671422927888150j,
-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 - .4259017538272934994996429j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 - .7186517314108401705762571j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 - 1.034388681126901058116589j,
-.4621740412532122027072175 + 1.034388681126901058116589j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848 - .2526580934582164192308115j,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 - .5085815689631499483745341j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 - .7730546212691183706919682j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 - 1.060073670135929666774323j,
-.4331415561553618854685942 + 1.060073670135929666774323j]
elif N == 10:
p = [-.9091347320900502436826431 - .1139583137335511169927714j,
-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 - .3430008233766309973110589j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 - .5759147538499947070009852j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 - .8175836167191017226233947j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 - 1.081274842819124562037210j,
-.4083220732868861566219785 + 1.081274842819124562037210j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744 - .2080480375071031919692341j,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 - .4178696917801248292797448j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 - .6319150050721846494520941j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 - .8547813893314764631518509j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 - 1.099117466763120928733632j,
-.3868149510055090879155425 + 1.099117466763120928733632j]
elif N == 12:
p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j,
-.9084478234140682638817772 + 95506365213450398415258360.0e-27j,
-.8802534342016826507901575 - .2871779503524226723615457j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 - .4810212115100676440620548j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 - .6792961178764694160048987j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 - .8863772751320727026622149j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 - 1.114373575641546257595657j,
-.3679640085526312839425808 + 1.114373575641546257595657j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718 - .1768342956161043620980863j,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 - .3547413731172988997754038j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 - .5350752120696801938272504j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 - .7199611890171304131266374j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 - .9135900338325109684927731j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 - 1.127591548317705678613239j,
-.3512792323389821669401925 + 1.127591548317705678613239j]
elif N == 14:
p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j,
-.9077932138396487614720659 + 82196399419401501888968130.0e-27j,
-.8869506674916445312089167 - .2470079178765333183201435j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 - .4131653825102692595237260j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 - .5819170677377608590492434j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 - .7552857305042033418417492j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 - .9373043683516919569183099j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 - 1.139172297839859991370924j,
-.3363868224902037330610040 + 1.139172297839859991370924j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918 - .1537681197278439351298882j,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 - .3082352470564267657715883j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 - .4642348752734325631275134j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 - .6229396358758267198938604j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 - .7862895503722515897065645j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 - .9581787261092526478889345j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 - 1.149416154583629539665297j,
-.3229963059766444287113517 + 1.149416154583629539665297j]
elif N == 16:
p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j,
-.9072099595087001356491337 + 72142113041117326028823950.0e-27j,
-.8911723070323647674780132 - .2167089659900576449410059j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 - .3621697271802065647661080j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 - .5092933751171800179676218j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 - .6591950877860393745845254j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 - .8137453537108761895522580j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 - .9767137477799090692947061j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 - 1.158552841199330479412225j,
-.3108782755645387813283867 + 1.158552841199330479412225j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844 - .1360267995173024591237303j,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 - .2725347156478803885651973j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 - .4100759282910021624185986j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 - .5493724405281088674296232j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 - .6914936286393609433305754j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 - .8382497252826992979368621j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 - .9932971956316781632345466j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 - 1.166761272925668786676672j,
-.2998489459990082015466971 + 1.166761272925668786676672j]
elif N == 18:
p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j,
-.9067004324162775554189031 + 64279241063930693839360680.0e-27j,
-.8939764278132455733032155 - .1930374640894758606940586j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 - .3224204925163257604931634j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 - .4529385697815916950149364j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 - .5852778162086640620016316j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 - .7204696509726630531663123j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 - .8602708961893664447167418j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 - 1.008234300314801077034158j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 - 1.174183010600059128532230j,
-.2897592029880489845789953 + 1.174183010600059128532230j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536 - .1219568381872026517578164j,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 - .2442590757549818229026280j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 - .3672925896399872304734923j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 - .4915365035562459055630005j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 - .6176483917970178919174173j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 - .7466272357947761283262338j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 - .8801817131014566284786759j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 - 1.021768776912671221830298j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 - 1.180931628453291873626003j,
-.2804866851439370027628724 + 1.180931628453291873626003j]
elif N == 20:
p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j,
-.9062570115576771146523497 + 57961780277849516990208850.0e-27j,
-.8959150941925768608568248 - .1740317175918705058595844j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 - .2905559296567908031706902j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 - .4078917326291934082132821j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 - .5264942388817132427317659j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 - .6469975237605228320268752j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 - .7703721701100763015154510j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 - .8982829066468255593407161j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 - 1.034097702560842962315411j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 - 1.187099379810885886139638j,
-.2719299580251652601727704 + 1.187099379810885886139638j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083 - .1105252572789856480992275j,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 - .2213069215084350419975358j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 - .3326258512522187083009453j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 - .4448177739407956609694059j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 - .5583186348022854707564856j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 - .6737426063024382240549898j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 - .7920349342629491368548074j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 - .9148198405846724121600860j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 - 1.045382255856986531461592j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 - 1.192762031948052470183960j,
-.2640041595834031147954813 + 1.192762031948052470183960j]
elif N == 22:
p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j,
-.9058702269930872551848625 + 52774908289999045189007100.0e-27j,
-.8972983138153530955952835 - .1584351912289865608659759j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 - .2644363039201535049656450j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 - .3710389319482319823405321j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 - .4785619492202780899653575j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 - .5874255426351153211965601j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 - .6982266265924524000098548j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 - .8118875040246347267248508j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 - .9299947824439872998916657j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 - 1.055755605227545931204656j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 - 1.197982433555213008346532j,
-.2566376987939318038016012 + 1.197982433555213008346532j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993 - .1010534335314045013252480j,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 - .2023024699381223418195228j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 - .3039581993950041588888925j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 - .4062657948237602726779246j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 - .5095305912227258268309528j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 - .6141594859476032127216463j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 - .7207341374753046970247055j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 - .8301558302812980678845563j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 - .9439760364018300083750242j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 - 1.065328794475513585531053j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 - 1.202813187870697831365338j,
-.2497697202208956030229911 + 1.202813187870697831365338j]
elif N == 24:
p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j,
-.9055312363372773709269407 + 48440066540478700874836350.0e-27j,
-.8983105104397872954053307 - .1454056133873610120105857j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 - .2426335234401383076544239j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 - .3403202112618624773397257j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 - .4386985933597305434577492j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 - .5380628490968016700338001j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 - .6388084216222567930378296j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 - .7415032695091650806797753j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 - .8470292433077202380020454j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 - .9569048385259054576937721j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 - 1.074195196518674765143729j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 - 1.207298683731972524975429j,
-.2433481337524869675825448 + 1.207298683731972524975429j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561 - 93077131185102967450643820.0e-27j,
-.9028833390228020537142561 + 93077131185102967450643820.0e-27j,
-.8928551459883548836774529 - .1863068969804300712287138j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 - .2798521321771408719327250j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 - .3738977875907595009446142j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 - .4686668574656966589020580j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 - .5644441210349710332887354j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 - .6616149647357748681460822j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 - .7607348858167839877987008j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 - .8626676330388028512598538j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 - .9689006305344868494672405j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 - 1.082433927173831581956863j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 - 1.211476658382565356579418j,
-.2373280669322028974199184 + 1.211476658382565356579418j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord]
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
camallen/aggregation | experimental/penguins/cython/divisiveDBSCAN.py | 2 | 7243 | #!/usr/bin/env python
__author__ = 'greg'
from sklearn.cluster import DBSCAN
import numpy as np
import math
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
def dist(c1,c2):
return math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
class CannotSplit(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return ""
samples_needed = 3
class DivisiveDBSCAN:
def __init__(self, min_samples):
self.min_samples = min_samples
self.starting_epsilon = math.sqrt(1000**2 + 750**2)
def __own_DBSCAN__(self,epsilon,markings,debug=False):
visited = {m:False for m in markings}
in_cluster = {m:False for m in markings}
def r1(m,m2):
if math.sqrt(math.pow(m[0]-m2[0],2)+math.pow(m[1]-m2[1],2)) <= epsilon+0.05:
return True
else:
return False
def r2(m,m2):
if ((m[0]-m2[0])**2+(m[1]-m2[1])**2)**0.5 <= epsilon:
return True
else:
return False
def query_region__(m):
return [m2 for m2 in markings if r1(m,m2) and not(in_cluster[m2])]
def cluster__(m, nearby_m):
cluster = set([m])
in_cluster[m] = True
for m2 in nearby_m:
if not(in_cluster[m2]):
cluster.add(m2)
in_cluster[m2] = True
if not(visited[m2]):
visited[m2] = True
nearby_m2 = query_region__(m2)
if len(nearby_m2) >= self.min_samples:
nearby_m.extend([m3 for m3 in nearby_m2 if not(m3 in nearby_m)])
return cluster
clusters = []
for m in markings:
if not(visited[m]):
visited[m] = True
nearby_m = query_region__(m)
if len(nearby_m) >= self.min_samples:
clusters.append(cluster__(m,nearby_m))
for c in clusters:
assert(len(c) >= self.min_samples)
labels = []
if debug:
print clusters
for m in markings:
found = False
for cluster_index,c in enumerate(clusters):
if m in c:
labels.append(cluster_index)
found = True
break
if not(found):
labels.append(-1)
return labels
def binary_search_DBSCAN(self,markings,user_ids,starting_epsilon,jpeg_file=None):
if jpeg_file is not None:
image_file = cbook.get_sample_data(jpeg_file)
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
x,y = zip(*markings)
plt.plot(x,y,'.',color='blue')
#check to see if all of the points are from the same user - if so, they are noise
#and we can skip the rest of the method
if len(set(user_ids)) == 1:
return markings,[],[]
#given starting_epsilon (and min_samples), all of the markings should be in the same cluster
markings_nparray = np.array(markings)
min_epsilon = 0.
max_epsilon = starting_epsilon
#print "===" + str(starting_epsilon)
#print self.min_samples
while (max_epsilon-min_epsilon) >= 0.01:
mid_epsilon = (max_epsilon+min_epsilon)/2.
#print mid_epsilon
db = DBSCAN(eps=mid_epsilon, min_samples=self.min_samples).fit(markings_nparray)
labels = db.labels_
unique_labels = set(labels)
#labels = self.__own_DBSCAN__(mid_epsilon,markings)
#unique_labels = set(labels)
if len(unique_labels) > 1:
min_epsilon = mid_epsilon
else:
max_epsilon = mid_epsilon
continue
#if all of the resulting clusters still need to be split
all_split = True
for k in unique_labels:
if k == -1:
continue
u_ = [ip for index,ip in enumerate(user_ids) if labels[index] == k]
#if a cluster does not need to be split any further
if len(set(u_)) == len(u_):
all_split = False
break
if all_split:
max_epsilon = mid_epsilon
else:
min_epsilon = mid_epsilon
#this is the epsilon we are going to be using
if min_epsilon == 0:
assert unique_labels == set([-1])
return markings,[],[]
assert min_epsilon > 0
#new_epsilon = int((min_epsilon*10))/10.
db = DBSCAN(eps=min_epsilon, min_samples=self.min_samples).fit(markings_nparray)
labels = db.labels_
#extract any and all clusters that do not need to be further split
#labels = self.__own_DBSCAN__(new_epsilon,markings)
unique_labels = set(labels)
#if all of the resulting clusters still need to be split
noise_markings = []
final_clusters = []
to_split_further_clusters = []
for k in unique_labels:
x_ = [ip for index,ip in enumerate(markings) if labels[index] == k]
#find out which points are in this cluster
#this usually will not happen - but CAN
if k == -1:
noise_markings.extend(x_)
else:
u_ = [ip for index,ip in enumerate(user_ids) if labels[index] == k]
#if a cluster does not need to be split any further - add it to the final clusters
if len(set(u_)) == len(u_):
if len(x_) < self.min_samples:
noise_markings.extend(x_)
else:
final_clusters.append(x_)
else:
if len(x_) < self.min_samples:
noise_markings.extend(x_)
else:
to_split_further_clusters.append((x_,u_,min_epsilon))
if (k == 0) and (jpeg_file is not None):
x,y = zip(*x_)
plt.plot(x,y,'.',color='green')
if jpeg_file is not None:
plt.show()
return noise_markings,final_clusters,to_split_further_clusters
def fit(self, markings,user_ids,jpeg_file=None):
#start by creating the initial "super" cluster
end_clusters = []
clusters_to_go = [(markings[:],user_ids[:],self.starting_epsilon),]
while True:
#if we have run out of clusters to process, break (hopefully done :) )
if clusters_to_go == []:
break
m_,u_,e_ = clusters_to_go.pop(0)
noise,final,to_split = self.binary_search_DBSCAN(m_,u_,e_,jpeg_file)
end_clusters.extend(final[:])
clusters_to_go.extend(to_split[:])
#break
cluster_centers = []
for cluster in end_clusters:
x,y = zip(*cluster)
cluster_centers.append((np.mean(x),np.mean(y)))
return cluster_centers
| apache-2.0 |
mjudsp/Tsallis | sklearn/gaussian_process/kernels.py | 24 | 66334 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if not isinstance(bounds, six.string_types) or bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = isinstance(bounds, six.string_types) and bounds == "fixed"
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
equialgo/scikit-learn | sklearn/metrics/cluster/supervised.py | 25 | 31477 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# Gregory Stupp <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy.misc import comb
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
from ...utils.validation import check_array
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
noelevans/sandpit | kaggle/washington_bike_share/learner.py | 1 | 1109 | import csv
import numpy as np
import pandas as pd
INPUT_FIELDS = set(['season', 'holiday', 'workingday', 'weather',
'temp', 'atemp', 'humidity', 'windspeed'])
CONTINUOUS_FIELDS = set(['temp', 'atemp', 'humidity', 'windspeed'])
MANUAL_FIELDS = set(['month', 'hour'])
DISCRETE_FIELDS = (INPUT_FIELDS - CONTINUOUS_FIELDS).union(MANUAL_FIELDS)
OUTCOME = 'count'
def load_and_munge_training_data(filename):
ds = pd.read_csv(open(filename))
parse_month = lambda dt: int(dt.split('-')[1])
parse_hour = lambda dt: int(dt.split()[1].split(':')[0])
ds['month'] = map(parse_month, ds['datetime'])
ds['hour'] = map(parse_hour, ds['datetime'])
return ds
def fields_influence(training):
def sum_std_deviations(field):
return training.groupby(field)['count'].apply(np.mean).sum()
return reversed(sorted((sum_std_deviations(f), f) for f in DISCRETE_FIELDS))
def main():
training = load_and_munge_training_data('train.csv')
print [el[1] for el in fields_influence(training)]
if __name__ == '__main__':
main()
| mit |
metaml/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py | 72 | 6429 | """
A module for parsing and generating fontconfig patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
# Author : Michael Droettboom <[email protected]>
# License : matplotlib license (PSF compatible)
# This class is defined here because it must be available in:
# - The old-style config framework (:file:`rcsetup.py`)
# - The traits-based config framework (:file:`mpltraits.py`)
# - The font manager (:file:`font_manager.py`)
# It probably logically belongs in :file:`font_manager.py`, but
# placing it in any of these places would have created cyclical
# dependency problems, or an undesired dependency on traits even
# when the traits-based config framework is not used.
import re
from matplotlib.pyparsing import Literal, ZeroOrMore, \
Optional, Regex, StringEnd, ParseException, Suppress
family_punc = r'\\\-:,'
family_unescape = re.compile(r'\\([%s])' % family_punc).sub
family_escape = re.compile(r'([%s])' % family_punc).sub
value_punc = r'\\=_:,'
value_unescape = re.compile(r'\\([%s])' % value_punc).sub
value_escape = re.compile(r'([%s])' % value_punc).sub
class FontconfigPatternParser:
"""A simple pyparsing-based parser for fontconfig-style patterns.
See the `fontconfig pattern specification
<http://www.fontconfig.org/fontconfig-user.html>`_ for more
information.
"""
_constants = {
'thin' : ('weight', 'light'),
'extralight' : ('weight', 'light'),
'ultralight' : ('weight', 'light'),
'light' : ('weight', 'light'),
'book' : ('weight', 'book'),
'regular' : ('weight', 'regular'),
'normal' : ('weight', 'normal'),
'medium' : ('weight', 'medium'),
'demibold' : ('weight', 'demibold'),
'semibold' : ('weight', 'semibold'),
'bold' : ('weight', 'bold'),
'extrabold' : ('weight', 'extra bold'),
'black' : ('weight', 'black'),
'heavy' : ('weight', 'heavy'),
'roman' : ('slant', 'normal'),
'italic' : ('slant', 'italic'),
'oblique' : ('slant', 'oblique'),
'ultracondensed' : ('width', 'ultra-condensed'),
'extracondensed' : ('width', 'extra-condensed'),
'condensed' : ('width', 'condensed'),
'semicondensed' : ('width', 'semi-condensed'),
'expanded' : ('width', 'expanded'),
'extraexpanded' : ('width', 'extra-expanded'),
'ultraexpanded' : ('width', 'ultra-expanded')
}
def __init__(self):
family = Regex(r'([^%s]|(\\[%s]))*' %
(family_punc, family_punc)) \
.setParseAction(self._family)
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \
.setParseAction(self._size)
name = Regex(r'[a-z]+') \
.setParseAction(self._name)
value = Regex(r'([^%s]|(\\[%s]))*' %
(value_punc, value_punc)) \
.setParseAction(self._value)
families =(family
+ ZeroOrMore(
Literal(',')
+ family)
).setParseAction(self._families)
point_sizes =(size
+ ZeroOrMore(
Literal(',')
+ size)
).setParseAction(self._point_sizes)
property =( (name
+ Suppress(Literal('='))
+ value
+ ZeroOrMore(
Suppress(Literal(','))
+ value)
)
| name
).setParseAction(self._property)
pattern =(Optional(
families)
+ Optional(
Literal('-')
+ point_sizes)
+ ZeroOrMore(
Literal(':')
+ property)
+ StringEnd()
)
self._parser = pattern
self.ParseException = ParseException
def parse(self, pattern):
"""
Parse the given fontconfig *pattern* and return a dictionary
of key/value pairs useful for initializing a
:class:`font_manager.FontProperties` object.
"""
props = self._properties = {}
try:
self._parser.parseString(pattern)
except self.ParseException, e:
raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e))
self._properties = None
return props
def _family(self, s, loc, tokens):
return [family_unescape(r'\1', str(tokens[0]))]
def _size(self, s, loc, tokens):
return [float(tokens[0])]
def _name(self, s, loc, tokens):
return [str(tokens[0])]
def _value(self, s, loc, tokens):
return [value_unescape(r'\1', str(tokens[0]))]
def _families(self, s, loc, tokens):
self._properties['family'] = [str(x) for x in tokens]
return []
def _point_sizes(self, s, loc, tokens):
self._properties['size'] = [str(x) for x in tokens]
return []
def _property(self, s, loc, tokens):
if len(tokens) == 1:
if tokens[0] in self._constants:
key, val = self._constants[tokens[0]]
self._properties.setdefault(key, []).append(val)
else:
key = tokens[0]
val = tokens[1:]
self._properties.setdefault(key, []).extend(val)
return []
parse_fontconfig_pattern = FontconfigPatternParser().parse
def generate_fontconfig_pattern(d):
"""
Given a dictionary of key/value pairs, generates a fontconfig
pattern string.
"""
props = []
families = ''
size = ''
for key in 'family style variant weight stretch file size'.split():
val = getattr(d, 'get_' + key)()
if val is not None and val != []:
if type(val) == list:
val = [value_escape(r'\\\1', str(x)) for x in val if x is not None]
if val != []:
val = ','.join(val)
props.append(":%s=%s" % (key, val))
return ''.join(props)
| agpl-3.0 |
armbrustlab/seaflowpy | src/seaflowpy/fileio.py | 1 | 16455 | from contextlib import contextmanager
import gzip
import io
import os
import zlib
import numpy as np
import pandas as pd
from . import errors
from . import particleops
from .seaflowfile import SeaFlowFile
from . import util
@contextmanager
def file_open_r(path, fileobj=None):
"""
Open path or fileobj for reading as a context manager.
Data read from the return value of this function will come from path or
preferentially fileobj if provided. If path is provided and ends with '.gz'
data will be considered gzip compressed even if read from fileobj. All
resources opened by this function (input file handles) or open resources
passed to this function (fileobj) will be cleaned up by context managers.
The return value of this function should always be used within a 'with'
block.
Parameters
-----------
path: str
File path.
fileobj: io.BytesIO, optional
Open file object.
Returns
-------
Context manager for file-like object of Bytes.
"""
# zlib is faster than gzip for decompression of EVT data on MacOS, and
# comparable on Linux.
if fileobj:
if path.endswith('.gz'):
gzdata = fileobj.read()
zobj = zlib.decompressobj(wbits=zlib.MAX_WBITS|32)
data = zobj.decompress(gzdata)
yield io.BytesIO(data)
else:
yield fileobj
else:
if path.endswith('.gz'):
with io.open(path, 'rb') as fileobj:
gzdata = fileobj.read()
zobj = zlib.decompressobj(wbits=zlib.MAX_WBITS|32)
data = zobj.decompress(gzdata)
yield io.BytesIO(data)
else:
with io.open(path, 'rb') as fh:
yield fh
@contextmanager
def file_open_w(path):
"""
Open path for writing as a context manager.
If path ends with '.gz' data will gzip compressed. Only the write method of
the returned object should be used. All resources opened in this function
(output file handles, gzipping child processes) will be cleaned up by
context managers. The return value of this function should always be used
within a 'with' block.
Parameters
-----------
path: str
File path.
Returns
-------
Context manager for writable file-like object.
"""
if path.endswith('.gz'):
with gzip.open(path, mode='wb', compresslevel=9) as fh:
yield fh
else:
with io.open(path, 'wb') as fh:
yield fh
def read_labview(path, columns, fileobj=None):
"""
Read a labview binary SeaFlow data file.
Data will be read from the file at the provided path or preferentially from
fileobj if provided. If path is provided and ends with '.gz' data will be
considered gzip compressed even if read from fileobj.
Parameters
-----------
path: str
File path.
columns: list of str
Names of columns. Also represents how many columns there are.
fileobj: io.BytesIO, optional
Open file object.
Returns
-------
pandas.DataFrame
SeaFlow event DataFrame as numpy.float64 values.
"""
colcnt = len(columns) + 2 # 2 leading column per row
try:
with file_open_r(path, fileobj) as fh:
# Particle count (rows of data) is stored in an initial 32-bit
# unsigned int
buff = fh.read(4)
if len(buff) == 0:
raise errors.FileError("File is empty")
if len(buff) != 4:
raise errors.FileError("File has invalid particle count header")
rowcnt = np.frombuffer(buff, dtype="uint32", count=1)[0]
if rowcnt == 0:
raise errors.FileError("File has no particle data")
# Read the rest of the data. Each particle has colcnt unsigned
# 16-bit ints in a row.
expected_bytes = rowcnt * colcnt * 2 # rowcnt * colcnt columns * 2 bytes
# must cast to int here because while BufferedIOReader objects
# returned from io.open(path, "rb") will accept a numpy.int64 type,
# io.BytesIO objects will not accept this type and will only accept
# vanilla int types. This is true for Python 3, not for Python 2.
buff = fh.read(int(expected_bytes))
# Read any extra data at the end of the file for error checking. There
# shouldn't be any extra data, btw.
extra_bytes = 0
while True:
new_bytes = len(fh.read(8192))
extra_bytes += new_bytes
if new_bytes == 0: # end of file
break
except (IOError, EOFError, zlib.error) as e:
raise errors.FileError("File could not be read: {}".format(str(e)))
# Check that file has the expected number of data bytes.
found_bytes = len(buff) + extra_bytes
if found_bytes != expected_bytes:
raise errors.FileError(
"File has incorrect number of data bytes. Expected %i, saw %i" %
(expected_bytes, found_bytes)
)
events = np.frombuffer(buff, dtype="uint16", count=rowcnt*colcnt)
# Reshape into a matrix of colcnt columns and one row per particle
events = np.reshape(events, [rowcnt, colcnt])
# Create a Pandas DataFrame with descriptive column names.
#
# The first two uint16s [0,10] from start of each row are left out.
# These ints are an idiosyncrasy of LabVIEW's binary output format.
# I believe they're supposed to serve as EOL signals (NULL,
# linefeed in ASCII), but because the last line doesn't have them
# it's easier to treat them as leading ints on each line after the
# header.
df = pd.DataFrame(np.delete(events, [0, 1], 1), columns=columns)
return df
def read_labview_row_count(path, fileobj=None):
"""
Get the row count of a labview binary SeaFlow data file.
Only a small amount of data from the beginning of the file will be read to
get the reported row count from the file header. This should be a much
faster method of getting row count than reading the whole file. Data will
be read from the file at the provided path or preferentially from fileobj
if provided. If path is provided and ends with '.gz' data will be
considered gzip compressed even if read from fileobj.
Parameters
-----------
path: str
File path.
fileobj: io.BytesIO, optional
Open file object.
Returns
-------
int
Number of rows reported in the labview file header (first uint32).
"""
if path.endswith('.gz'):
with io.open(path, 'rb') as fh:
fileobj = io.BytesIO(fh.read(512)) # read enough to get first 4 uncompressed bytes
with file_open_r(path, fileobj) as fh:
# Particle count (rows of data) is stored in an initial 32-bit
# unsigned int
try:
buff = fh.read(4)
except (IOError, EOFError) as e:
raise errors.FileError("File could not be read: {}".format(str(e)))
if len(buff) == 0:
raise errors.FileError("File is empty")
if len(buff) != 4:
raise errors.FileError("File has invalid particle count header")
rowcnt = np.frombuffer(buff, dtype="uint32", count=1)[0]
return rowcnt
def read_evt_labview(path, fileobj=None):
"""
Read a raw labview binary SeaFlow data file.
Data will be read from the file at the provided path or preferentially from
fileobj if provided. If path is provided and ends with '.gz' data will be
considered gzip compressed even if read from fileobj.
Parameters
-----------
path: str
File path.
fileobj: io.BytesIO, optional
Open file object.
Returns
-------
pandas.DataFrame
SeaFlow event DataFrame as numpy.float64 values.
"""
return read_labview(path, particleops.COLUMNS, fileobj).astype(np.float64)
def read_opp_labview(path, fileobj=None):
"""
Read an OPP labview binary SeaFlow data file.
Data will be read from the file at the provided path or preferentially from
fileobj if provided. If path is provided and ends with '.gz' data will be
considered gzip compressed even if read from fileobj.
Parameters
-----------
path: str
File path.
fileobj: io.BytesIO, optional
Open file object.
Returns
-------
pandas.DataFrame
SeaFlow OPP DataFrame as numpy.float64 values with quantile flag
columns.
"""
df = read_labview(path, particleops.COLUMNS + ["bitflags"], fileobj)
df[particleops.COLUMNS] = df[particleops.COLUMNS].astype(np.float64)
df["noise"] = False # we know there are no noise events in OPP data
df["saturated"] = False # we know there are no saturated events in OPP data
df = particleops.decode_bit_flags(df)
return df
def read_vct_csv(path, fileobj=None):
"""
Read a VCT space-separated CSV SeaFlow data file for one quantile.
Data will be read from the file at the provided path or preferentially from
fileobj if provided. If path is provided and ends with '.gz' data will be
considered gzip compressed even if read from fileobj.
Parameters
-----------
path: str
File path.
fileobj: io.BytesIO, optional
Open file object.
Returns
-------
pandas.DataFrame
SeaFlow VCT DataFrame for one quantile as numpy.float64 values plus
one text column for population labels.
"""
with file_open_r(path, fileobj) as fh:
df = pd.read_csv(fh, sep=" ", names=["diam_lwr", "Qc_lwr", "diam_mid", "Qc_mid", "diam_upr", "Qc_upr", "pop"])
return df
def read_filter_params_csv(path):
"""
Read a filter parameters csv file.
Parameters
----------
path: str
Path to filter parameters csv file.
Returns
-------
pandas.DataFrame
Contents of csv file with "." in column headers replaced with "_".
"""
defaults = {
"sep": str(','),
"na_filter": True,
"encoding": "utf-8"
}
try:
df = pd.read_csv(path, **defaults)
except pd.errors.ParserError:
raise errors.FileError("could not parse {} as csv filter paramater file".format(path))
# Fix column names
df.columns = [c.replace('.', '_') for c in df.columns]
# Make sure serial numbers are treated as strings
df = df.astype({"instrument": "str"})
return df
def write_labview(df, path):
"""
Write SeaFlow event DataFrame as LabView binary file.
Parameters
-----------
df: pandas.DataFrame
SeaFlow event DataFrame.
path: str
Output file path. If this ends with '.gz' data will be gzip compressed.
"""
# Make sure directory necessary directory tree exists
util.mkdir_p(os.path.dirname(path))
# Open output file
with file_open_w(path) as fh:
# Write 32-bit uint particle count header
header = np.array([len(df.index)], np.uint32)
fh.write(header.tobytes())
if len(df.index) > 0:
# Convert to uint16 before saving
df = df.astype(np.uint16)
# Add leading 4 bytes to match LabViews binary format
zeros = np.zeros([len(df.index), 1], dtype=np.uint16)
tens = np.copy(zeros)
tens.fill(10)
df.insert(0, "tens", tens)
df.insert(1, "zeros", zeros)
# Write particle data
fh.write(df.values.tobytes())
def write_evt_labview(df, path, outdir, gz=True):
"""
Write a raw SeaFlow event DataFrame as LabView binary file.
Parameters
-----------
df: pandas.DataFrame
SeaFlow raw event DataFrame.
path: str
File name. This will be converted into a standard SeaFlow file ID and
will be used to construct the final output file path within outdir. The
final file name will a ".gz" extension if gz is True.
outdir: str
Output directory. This function will create day of year subdirectories
for EVT binary files.
gz: bool, default True
Gzip compress?
"""
if df is None:
return
sfile = SeaFlowFile(path)
outpath = os.path.join(outdir, sfile.file_id)
if gz:
outpath = outpath + ".gz"
# Only keep columns we intend to write to file
write_labview(df[particleops.COLUMNS], outpath)
def write_opp_labview(df, path, outdir, gz=True, require_all=True):
"""
Write an OPP SeaFlow event DataFrame as LabView binary file.
Quantile flags will be encoded as a final bit flag column. The noise column
will be dropped.
Parameters
-----------
df: pandas.DataFrame
SeaFlow focused particle DataFrame.
path: str
File name. This will be converted into a standard SeaFlow file ID and
will be used to construct the final output file path within outdir. The
final file name will also have an ".opp" file extension, plus ".gz"
extension if gz is True.
outdir: str
Output directory. This function will create day of year subdirectories
for EVT binary files.
gz: bool, default True
Gzip compress?
require_all: bool, default True
If true all an output file will only be created if there is focused
particle data for all quantiles.
"""
if df is None:
return
# Return early if any quantiles got completely filtered out
write_flag = True
if require_all:
for q_col, _q, _q_str, q_df in particleops.quantiles_in_df(df):
write_flag = write_flag & q_df[q_col].any()
if write_flag:
# Attach a bit flag column to encode all the per-quantile focused
# particle flags.
df = particleops.encode_bit_flags(df.copy())
sfile = SeaFlowFile(path)
outpath = os.path.join(outdir, sfile.file_id + ".opp")
if gz:
outpath = outpath + ".gz"
# Only keep columns we intend to write to file
write_labview(df[particleops.COLUMNS + ["bitflags"]], outpath)
def write_opp_parquet(opp_dfs, date, window_size, outdir):
"""
Write an OPP Parquet file.
Use snappy compression.
Parameters
-----------
opp_dfs: pandas.DataFrame
SeaFlow focused particle DataFrames with file_id, date, and index reflecting
positions in original EVT DataFrames.
date: pandas.Timestamp or datetime.datetime object
Start timestamp for data in df.
window_size: pandas offset alias for time window covered by this file. Time
covered by this file is date + time_window.
outdir: str
Output directory.
"""
if not opp_dfs:
return
# Make sure directory necessary directory tree exists
util.mkdir_p(outdir)
outpath = os.path.join(outdir, date.isoformat().replace(":", "-")) + f".{window_size}.opp.parquet"
df = pd.concat(opp_dfs, ignore_index=True)
# Linearize data columns
df = particleops.linearize_particles(df, columns=["D1", "D2", "fsc_small", "pe", "chl_small"])
# Only keep columns we intend to write to file, reorder
columns = [
"date",
"file_id",
"D1",
"D2",
"fsc_small",
"pe",
"chl_small",
"q2.5",
"q50",
"q97.5",
"filter_id"
]
df = df[columns]
# Check for an existing file. Merge, overwriting matching existing entries.
try:
old_df = pd.read_parquet(outpath)
except FileNotFoundError:
pass
else:
if not all(old_df.columns == df.columns):
raise ValueError("existing OPP parquet file has incompatible column names")
new_files = list(df["file_id"].unique())
old_df = old_df[~old_df["file_id"].isin(new_files)] # drop rows in old_df that are in new data
df = pd.concat([old_df, df], ignore_index=True)
df.sort_values(by="file_id", kind="mergesort", inplace=True) # mergesort is stable
# Make sure file_id and filter_id are categorical columns
if df["file_id"].dtype.name != "category":
df["file_id"] = df["file_id"].astype("category")
if df["filter_id"].dtype.name != "category":
df["filter_id"] = df["filter_id"].astype("category")
# Write parquet
df.to_parquet(outpath, compression="snappy", index=False, engine="pyarrow")
| gpl-3.0 |
khkaminska/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
jetuk/pywr | pywr/timestepper.py | 3 | 3423 | import pandas
from pywr import _core
class Timestepper(object):
def __init__(self, start="2015-01-01", end="2015-12-31", delta=1):
self.start = start
self.end = end
self.delta = delta
self._last_length = None
self.reset()
def __iter__(self, ):
return self
def __len__(self, ):
return int((self.end-self.start)/self.delta) + 1
def reset(self, start=None):
""" Reset the timestepper
If start is None it resets to the original self.start, otherwise
start is used as the new starting point.
"""
self._current = None
current_length = len(self)
if start is None:
self._next = _core.Timestep(self.start, 0, self.delta.days)
else:
# Calculate actual index from new position
diff = start - self.start
if diff.days % self.delta.days != 0:
raise ValueError('New starting position is not compatible with the existing starting position and timestep.')
index = diff.days / self.delta.days
self._next = _core.Timestep(start, index, self.delta.days)
length_changed = self._last_length != current_length
self._last_length = current_length
return length_changed
def __next__(self, ):
return self.next()
def next(self, ):
self._current = current = self._next
if current.datetime > self.end:
raise StopIteration()
# Increment to next timestep
self._next = _core.Timestep(current.datetime + self.delta, current.index + 1, self.delta.days)
# Return this timestep
return current
def start():
def fget(self):
return self._start
def fset(self, value):
if isinstance(value, pandas.Timestamp):
self._start = value
else:
self._start = pandas.to_datetime(value)
return locals()
start = property(**start())
def end():
def fget(self):
return self._end
def fset(self, value):
if isinstance(value, pandas.Timestamp):
self._end = value
else:
self._end = pandas.to_datetime(value)
return locals()
end = property(**end())
def delta():
def fget(self):
return self._delta
def fset(self, value):
try:
self._delta = pandas.Timedelta(days=value)
except TypeError:
self._delta = pandas.to_timedelta(value)
return locals()
delta = property(**delta())
@property
def current(self):
"""The current timestep
If iteration has not begun this will return None.
"""
return self._current
@property
def datetime_index(self):
""" Return a `pandas.DatetimeIndex` using the start, end and delta of this object
This is useful for creating `pandas.DataFrame` objects from Model results
"""
freq = '{}D'.format(self.delta.days)
return pandas.date_range(self.start, self.end, freq=freq)
def __repr__(self):
start = self.start.strftime("%Y-%m-%d")
end = self.end.strftime("%Y-%m-%d")
delta = self.delta.days
return "<Timestepper start=\"{}\" end=\"{}\" delta=\"{}\">".format(
start, end, delta
)
| gpl-3.0 |
ran5515/DeepDecision | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
scottpurdy/NAB | tests/integration/false_negative_test.py | 8 | 3465 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import datetime
import pandas
import unittest
from nab.scorer import Scorer
from nab.test_helpers import generateTimestamps, generateWindows, generateLabels
class FalseNegativeTests(unittest.TestCase):
def _checkCounts(self, counts, tn, tp, fp, fn):
"""Ensure the metric counts are correct."""
self.assertEqual(counts['tn'], tn, "Incorrect tn count")
self.assertEqual(counts['tp'], tp, "Incorrect tp count")
self.assertEqual(counts['fp'], fp, "Incorrect fp count")
self.assertEqual(counts['fn'], fn, "Incorrect fn count")
def setUp(self):
self.costMatrix = {"tpWeight": 1.0,
"fnWeight": 1.0,
"fpWeight": 1.0,
"tnWeight": 1.0}
def testFalseNegativeCausesNegativeScore(self):
"""
A false negative with only one window should have exactly the negative
of the false negative score.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 1000
numWindows = 1
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertTrue(abs(score + self.costMatrix['fnWeight']) < 0.1)
self._checkCounts(scorer.counts, length-windowSize*numWindows, 0, 0,
windowSize*numWindows)
def testFourFalseNegatives(self):
"""
A false negative with four windows should have exactly four times
the negative of the false negative score.
"""
start = datetime.datetime.now()
increment = datetime.timedelta(minutes=5)
length = 2000
numWindows = 4
windowSize = 10
timestamps = generateTimestamps(start, increment, length)
windows = generateWindows(timestamps, numWindows, windowSize)
labels = generateLabels(timestamps, windows)
predictions = pandas.Series([0]*length)
scorer = Scorer(timestamps, predictions, labels, windows, self.costMatrix,
probationaryPeriod=0)
(_, score) = scorer.getScore()
self.assertTrue(abs(score + 4*self.costMatrix['fnWeight']) < 0.01)
self._checkCounts(scorer.counts, length-windowSize*numWindows, 0, 0,
windowSize*numWindows)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
JsNoNo/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
dlmacedo/SVM-CNN | elm.py | 13 | 20319 | # -*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""
The :mod:`elm` module implements the
Extreme Learning Machine Classifiers and Regressors (ELMClassifier,
ELMRegressor, SimpleELMRegressor, SimpleELMClassifier).
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.linalg import pinv2
from sklearn.utils import as_float_array
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.preprocessing import LabelBinarizer
from random_layer import RandomLayer, MLPRandomLayer
__all__ = ["ELMRegressor",
"ELMClassifier",
"GenELMRegressor",
"GenELMClassifier"]
# BaseELM class, regressor and hidden_layer attributes
# and provides defaults for docstrings
class BaseELM(BaseEstimator):
"""
Base class for ELMs.
Warning: This class should not be used directly.
Use derived classes instead.
"""
__metaclass__ = ABCMeta
def __init__(self, hidden_layer, regressor):
self.regressor = regressor
self.hidden_layer = hidden_layer
@abstractmethod
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
@abstractmethod
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
class GenELMRegressor(BaseELM, RegressorMixin):
"""
ELMRegressor is a regressor based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
Parameters
----------
`hidden_layer` : random_layer instance, optional
(default=MLPRandomLayer(random_state=0))
`regressor` : regressor instance, optional (default=None)
If provided, this object is used to perform the regression from hidden
unit activations to the outputs and subsequent predictions. If not
present, an ordinary linear least squares fit is performed
Attributes
----------
`coefs_` : numpy array
Fitted regression coefficients if no regressor supplied.
`fitted_` : bool
Flag set when fit has been called already.
`hidden_activations_` : numpy array of shape [n_samples, n_hidden]
Hidden layer activations for last input.
See Also
--------
RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self,
hidden_layer=MLPRandomLayer(random_state=0),
regressor=None):
super(GenELMRegressor, self).__init__(hidden_layer, regressor)
self.coefs_ = None
self.fitted_ = False
self.hidden_activations_ = None
def _fit_regression(self, y):
"""
fit regression using pseudo-inverse
or supplied regressor
"""
if (self.regressor is None):
self.coefs_ = safe_sparse_dot(pinv2(self.hidden_activations_), y)
else:
self.regressor.fit(self.hidden_activations_, y)
self.fitted_ = True
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
# fit random hidden layer and compute the hidden layer activations
self.hidden_activations_ = self.hidden_layer.fit_transform(X)
# solve the regression from hidden activations to outputs
self._fit_regression(as_float_array(y, copy=True))
return self
def _get_predictions(self):
"""get predictions using internal least squares/supplied regressor"""
if (self.regressor is None):
preds = safe_sparse_dot(self.hidden_activations_, self.coefs_)
else:
preds = self.regressor.predict(self.hidden_activations_)
return preds
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if (not self.fitted_):
raise ValueError("ELMRegressor not fitted")
# compute hidden layer activations
self.hidden_activations_ = self.hidden_layer.transform(X)
# compute output predictions for new hidden activations
predictions = self._get_predictions()
return predictions
class GenELMClassifier(BaseELM, ClassifierMixin):
"""
GenELMClassifier is a classifier based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
Parameters
----------
`hidden_layer` : random_layer instance, optional
(default=MLPRandomLayer(random_state=0))
`binarizer` : LabelBinarizer, optional
(default=LabelBinarizer(-1, 1))
`regressor` : regressor instance, optional (default=None)
If provided, this object is used to perform the regression from hidden
unit activations to the outputs and subsequent predictions. If not
present, an ordinary linear least squares fit is performed
Attributes
----------
`classes_` : numpy array of shape [n_classes]
Array of class labels
`genelm_regressor_` : ELMRegressor instance
Performs actual fit of binarized values
See Also
--------
RBFRandomLayer, MLPRandomLayer, ELMRegressor, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self,
hidden_layer=MLPRandomLayer(random_state=0),
binarizer=LabelBinarizer(-1, 1),
regressor=None):
super(GenELMClassifier, self).__init__(hidden_layer, regressor)
self.binarizer = binarizer
self.classes_ = None
self.genelm_regressor_ = GenELMRegressor(hidden_layer, regressor)
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
Returns
-------
C : array of shape [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,]
"""
return self.genelm_regressor_.predict(X)
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
self.genelm_regressor_.fit(X, y_bin)
return self
def predict(self, X):
"""Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
# ELMRegressor with default RandomLayer
class ELMRegressor(BaseEstimator, RegressorMixin):
"""
ELMRegressor is a regressor based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
ELMRegressor is a wrapper for an GenELMRegressor that uses a
RandomLayer and passes the __init__ parameters through
to the hidden layer generated by the fit() method.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate in the SimpleRandomLayer
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribase', 'sigmoid',
'hardlim', 'softlim', 'gaussian', 'multiquadric', 'inv_multiquadric' or
a callable. If none is given, 'tanh' will be used. If a callable
is given, it will be used to compute the hidden unit activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_hidden, n_features]
`regressor` : regressor instance, optional (default=None)
If provided, this object is used to perform the regression from hidden
unit activations to the outputs and subsequent predictions. If not
present, an ordinary linear least squares fit is performed
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`genelm_regressor_` : GenELMRegressor object
Wrapped object that actually performs the fit.
See Also
--------
RandomLayer, RBFRandomLayer, MLPRandomLayer,
GenELMRegressor, GenELMClassifier, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0,
activation_func='tanh', activation_args=None,
user_components=None, regressor=None, random_state=None):
self.n_hidden = n_hidden
self.alpha = alpha
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.user_components = user_components
self.rbf_width = rbf_width
self.regressor = regressor
self._genelm_regressor = None
def _create_random_layer(self):
"""Pass init params to RandomLayer"""
return RandomLayer(n_hidden=self.n_hidden,
alpha=self.alpha, random_state=self.random_state,
activation_func=self.activation_func,
activation_args=self.activation_args,
user_components=self.user_components,
rbf_width=self.rbf_width)
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
rhl = self._create_random_layer()
self._genelm_regressor = GenELMRegressor(hidden_layer=rhl,
regressor=self.regressor)
self._genelm_regressor.fit(X, y)
return self
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
if (self._genelm_regressor is None):
raise ValueError("SimpleELMRegressor not fitted")
return self._genelm_regressor.predict(X)
class ELMClassifier(ELMRegressor):
"""
ELMClassifier is a classifier based on the Extreme Learning Machine.
An Extreme Learning Machine (ELM) is a single layer feedforward
network with a random hidden layer components and ordinary linear
least squares fitting of the hidden->output weights by default.
[1][2]
ELMClassifier is an ELMRegressor subclass that first binarizes the
data, then uses the superclass to compute the decision function that
is then unbinarized to yield the prediction.
The params for the RandomLayer used in the input transform are
exposed in the ELMClassifier constructor.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate in the SimpleRandomLayer
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribase', 'sigmoid',
'hardlim', 'softlim', 'gaussian', 'multiquadric', 'inv_multiquadric' or
a callable. If none is given, 'tanh' will be used. If a callable
is given, it will be used to compute the hidden unit activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`classes_` : numpy array of shape [n_classes]
Array of class labels
See Also
--------
RandomLayer, RBFRandomLayer, MLPRandomLayer,
GenELMRegressor, GenELMClassifier, ELMClassifier
References
----------
.. [1] http://www.extreme-learning-machines.org
.. [2] G.-B. Huang, Q.-Y. Zhu and C.-K. Siew, "Extreme Learning Machine:
Theory and Applications", Neurocomputing, vol. 70, pp. 489-501,
2006.
"""
def __init__(self, n_hidden=20, alpha=0.5, rbf_width=1.0,
activation_func='tanh', activation_args=None,
user_components=None, regressor=None,
binarizer=LabelBinarizer(-1, 1),
random_state=None):
super(ELMClassifier, self).__init__(n_hidden=n_hidden,
alpha=alpha,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
rbf_width=rbf_width,
regressor=regressor)
self.classes_ = None
self.binarizer = binarizer
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
Returns
-------
C : array of shape [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,]
"""
return super(ELMClassifier, self).predict(X)
def fit(self, X, y):
"""
Fit the model using X, y as training data.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape [n_samples, n_outputs]
Target values (class labels in classification, real numbers in
regression)
Returns
-------
self : object
Returns an instance of self.
"""
self.classes_ = np.unique(y)
y_bin = self.binarizer.fit_transform(y)
super(ELMClassifier, self).fit(X, y_bin)
return self
def predict(self, X):
"""
Predict values using the model
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Returns
-------
C : numpy array of shape [n_samples, n_outputs]
Predicted values.
"""
raw_predictions = self.decision_function(X)
class_predictions = self.binarizer.inverse_transform(raw_predictions)
return class_predictions
def score(self, X, y):
"""Force use of accuracy score since we don't inherit
from ClassifierMixin"""
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
| apache-2.0 |
jhmatthews/cobra | source/disk_sub.py | 1 | 3164 | import sys
import numpy as np
import pylab
import matplotlib.pyplot as plt
import scipy.integrate
import scipy.optimize
from collections import namedtuple
import geo
import astro_help as ah
import disk_sub as disk
RADIAN=57.29598
C=2.997925e10
MSOL=1.979e33
G=6.670e-8
YR=3.1556925e7
EPSILON=1e-6
PI=3.1416
STEFAN_BOLTZMANN=5.669e-5
def tdisk (m, mdot, r):
t = 3. * G / (8. * PI * STEFAN_BOLTZMANN) * m * mdot / (r * r * r)
t = pow (t, 0.25)
return (t)
def teff (t, x):
q = (1.e0 - (x ** -0.5e0)) / (x * x * x);
q = t * (q ** 0.25e0);
return (q)
def spec_disk (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=(f2/f1)*100
freq=np.linspace(f1,f2,nfreq)
spec=np.empty(nfreq)
dfreq=freq[1]-freq[0]
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)):
spec[i]=spec[i]+(ah.planck_nu(t,freq[i])*area*PI*2)
return (freq,spec)
def spec_disk1 (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=1000
freq=np.logspace(np.log10(f1),np.log10(f2),nfreq)
spec=np.empty(nfreq)
dfreq=freq[1]-freq[0]
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)-1):
spec[i]=spec[i]+(ah.planck_nu(t,(freq[i+1]+freq[i])/2.0)*area*PI*2*(freq[i+1]-freq[i]))
return (freq,spec)
def lnu_disk (f,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
lnu=0.0
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
lnu=lnu+(ah.planck_nu(t,f)*area*PI*2.0)
return (lnu)
def llamb_disk (lamb,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
llamb=0.0
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
llamb=llamb+(ah.planck_lamb(t,lamb)*area*PI*2.0)
return (llamb)
def spec_disk2 (f1,f2,m,mdot,rmin,rmax):
tref=tdisk(m, mdot, rmin)
nfreq=10
f1a=10**float(int(np.log10(f1)))
f2a=10**float(int(np.log10(f2))+1)
nrange=int(np.log10((f2a/f1a)))
freq=[]
dfreq=[]
ftemp=f1a
df=f1a/nfreq
for i in range(nrange):
for j in range(nfreq*9):
ftemp=ftemp+df
if ftemp > f2:
break
if ftemp >= f1:
freq.append(ftemp)
df=df*10.0
#print freq[0],freq[len(freq)-1]
spec=np.zeros(len(freq))
rtemp=np.logspace(np.log10(rmin),np.log10(rmax),num=100)
rdisk=[]
for j in range(len(rtemp)-1):
rdisk.append((rtemp[j]+rtemp[j+1])/2.0)
r=rdisk[j]/rmin
area=PI*(rtemp[j+1]*rtemp[j+1]-rtemp[j]*rtemp[j])
t=(disk.teff(tref,r))
for i in range(len(freq)-1):
spec[i]=spec[i]+(ah.planck_nu(t,freq[i])*area*PI*2)
return (freq,spec)
| gpl-2.0 |
miaecle/deepchem | examples/toxcast/processing/tox.py | 9 | 1668 |
#Processing of ToxCast data
#Author - Aneesh Pappu
import pandas as pd
import numpy as np
#Loading dataframes and editing indices
path_to_casn_smiles = "./casn_to_smiles.csv.gz"
path_to_code_casn = "./code_to_casn.csv.gz"
path_to_hitc_code = "./code_to_hitc.csv.gz"
casn_smiles_df = pd.read_csv(path_to_casn_smiles)
code_casn_df = pd.read_csv(path_to_code_casn)
hitc_code_df = pd.read_csv(path_to_hitc_code)
casn_smiles_df = casn_smiles_df[['Substance_CASRN', 'Structure_SMILES']]
code_casn_df = code_casn_df[['casn', 'code']]
hitc_code_df.rename(columns = {'Unnamed: 0': 'code'}, inplace = True)
casn_smiles_df.rename(columns = {'Substance_CASRN': 'casn', 'Structure_SMILES': 'smiles'}, inplace = True)
code_casn_df.set_index('code', inplace = True)
casn_smiles_df.set_index('casn', inplace= True)
#Loop through rows of hitc matrix and replace codes with smiles strings
badCounter = 0 #keep track of rows with no corresponding smiles strings
for index, data in hitc_code_df.iterrows():
rowList = data.values.tolist()
code = rowList[0]
#get corresponding casn
try:
casn = code_casn_df.loc[code, 'casn']
except KeyError:
badCounter+=1
pass
#get corresponding smiles
try:
smiles = casn_smiles_df.loc[casn, 'smiles']
except KeyError:
badCounter+=1
pass
#write to cell
hitc_code_df.loc[index, 'code'] = smiles
#Tidy up and write to csv
hitc_code_df.rename(columns = {'code': 'smiles'}, inplace = True)
hitc_code_df.dropna(subset = ['smiles'], inplace = True)
hitc_code_df.reset_index(inplace = True, drop = True)
hitc_code_df.to_csv("./reprocessed_tox_cast.csv", index=False)
| mit |
scgmlz/BornAgain-tutorial | ba-school-2018/day_2/reflectometry_D/python_tutorial/task_script.py | 2 | 2261 | import numpy as np
from matplotlib import pyplot as plt
import bornagain as ba
from bornagain import deg, angstrom, nm
from plotter import PlotterSpecular
data_1 = np.loadtxt("python_exp_data_1.txt") # [deg, intensity], angle range [0, 3] deg
data_2 = np.loadtxt("python_exp_data_2.txt") # [deg, intensity], angle range [3, 5] deg
def get_sample(params):
# Task: adjust the sample to using params dictionary
# Defining Materials
material_1 = ba.MaterialBySLD("Air", 0.0, 0.0)
material_2 = ba.MaterialBySLD("AgNano", 1.322e-06, 4.85e-07)
material_3 = ba.MaterialBySLD("SiO2", 3.681e-06, 5.43e-07)
material_4 = ba.MaterialBySLD("Si", 2.074e-06, 6.3e-08)
# Defining Layers
layer_1 = ba.Layer(material_1)
layer_2 = ba.Layer(material_2, 20*nm)
layer_3 = ba.Layer(material_3, 5*nm)
layer_4 = ba.Layer(material_4)
# Defining Multilayers
multiLayer_1 = ba.MultiLayer()
multiLayer_1.addLayer(layer_1)
multiLayer_1.addLayer(layer_2)
multiLayer_1.addLayer(layer_3)
multiLayer_1.addLayer(layer_4)
return multiLayer_1
def get_simulation_1(params):
simulation = ba.SpecularSimulation()
footprint = ba.FootprintFactorSquare(0.001)
simulation.setBeamParameters(1.798*angstrom, data_1[:, 0]*deg, footprint)
simulation.setBeamIntensity(1.0e+8)
sample = get_sample(params)
simulation.setSample(sample)
return simulation
def get_simulation_2(params):
pass
# Task: write your simulation getter here
# simulation should comply with data_2
# set beam intensity equal to 5.0e+8
def run_fitting():
"""
main function to run fitting
"""
fit_objective = ba.FitObjective()
fit_objective.addSimulationAndData(get_simulation_1, data_1[:, 1], 1.0)
# Task: add second simulation-data pair
# visualizing fit progress
fit_objective.initPrint(10)
plotter = PlotterSpecular()
fit_objective.initPlot(10, plotter)
params = ba.Parameters()
# Task: initialize fit parameters
# use params.add(name, start, min, max) function
minimizer = ba.Minimizer()
result = minimizer.minimize(fit_objective.evaluate, params)
fit_objective.finalize(result)
if __name__ == '__main__':
run_fitting()
plt.show()
| gpl-3.0 |
rueckstiess/dopamine | agents/valuebased/mdfaestimator.py | 1 | 2463 | from numpy import *
from random import choice
from dopamine.agents.valuebased.estimator import Estimator
from dopamine.fapprox import Linear, LWPRFA
from matplotlib import pyplot as plt
class MDFAEstimator(Estimator):
conditions = {'discreteStates':False, 'discreteActions':True}
def __init__(self, stateDim, actionNum, faClass=Linear, ordered=False):
""" initialize with the state dimension and number of actions. """
self.stateDim = stateDim
self.actionNum = actionNum
self.faClass = faClass
self.ordered = ordered
self.fa = None
# create memory for ordered estimator
if self.ordered:
self.memory = []
# define training and target array
self.reset()
def getBestAction(self, state):
""" returns the action with maximal value in the given state. if several
actions have the same value, pick one at random.
"""
state = state.flatten()
values = self.fa.predict(state)
maxvalues = where(values == values.max())[0]
if len(maxvalues) > 0:
action = array([choice(maxvalues)])
else:
# this should not happen, but it does in rare cases, return the first action
action = array([0])
return action
def getValue(self, state, action):
""" returns the value of the given (state,action) pair as float. """
action = int(action.item())
if self.ordered and (action in self.memory):
return -inf
state = state.flatten()
return self.fa.predict(state)[action]
def updateValue(self, state, action, value):
action = int(action.item())
state = state.flatten()
output = self.fa.predict(state)
output[action] = value
self.fa.update(state, output)
def reset(self):
""" clear collected training set. """
# special case to clean up lwpr models that were pickled
if self.faClass == LWPRFA:
fa._cleanup()
self.fa = self.faClass(self.stateDim, self.actionNum)
def train(self):
""" train individual models for each actions seperately. """
self.fa.train()
def rememberAction(self, action):
if self.ordered:
self.memory.append(int(action.item()))
def resetMemory(self):
if self.ordered:
self.memory = []
| gpl-3.0 |
potash/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
voxlol/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
dhruv13J/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/reshape/test_cut.py | 1 | 19806 | import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
Series,
TimedeltaIndex,
Timestamp,
cut,
date_range,
isna,
qcut,
timedelta_range,
to_datetime,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
import pandas.core.reshape.tile as tmod
def test_simple():
data = np.ones(5, dtype="int64")
result = cut(data, 4, labels=False)
expected = np.array([1, 1, 1, 1, 1])
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
def test_bins():
data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1])
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7]))
def test_right():
data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=True, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
expected = Categorical(intervals, ordered=True)
expected = expected.take([0, 0, 0, 2, 3, 0, 0])
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7]))
def test_no_right():
data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575])
result, bins = cut(data, 4, right=False, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3), closed="left")
intervals = intervals.take([0, 0, 0, 2, 3, 0, 1])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095]))
def test_array_like():
data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1]
result, bins = cut(data, 3, retbins=True)
intervals = IntervalIndex.from_breaks(bins.round(3))
intervals = intervals.take([0, 0, 0, 1, 2, 0])
expected = Categorical(intervals, ordered=True)
tm.assert_categorical_equal(result, expected)
tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7]))
def test_bins_from_interval_index():
c = cut(range(5), 3)
expected = c
result = cut(range(5), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
expected = Categorical.from_codes(
np.append(c.codes, -1), categories=c.categories, ordered=True
)
result = cut(range(6), bins=expected.categories)
tm.assert_categorical_equal(result, expected)
def test_bins_from_interval_index_doc_example():
# Make sure we preserve the bins.
ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60])
c = cut(ages, bins=[0, 18, 35, 70])
expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)])
tm.assert_index_equal(c.categories, expected)
result = cut([25, 20, 50], bins=c.categories)
tm.assert_index_equal(result.categories, expected)
tm.assert_numpy_array_equal(result.codes, np.array([1, 1, 2], dtype="int8"))
def test_bins_not_overlapping_from_interval_index():
# see gh-23980
msg = "Overlapping IntervalIndex is not accepted"
ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)])
with pytest.raises(ValueError, match=msg):
cut([5, 6], bins=ii)
def test_bins_not_monotonic():
msg = "bins must increase monotonically"
data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1]
with pytest.raises(ValueError, match=msg):
cut(data, [0.1, 1.5, 1, 10])
@pytest.mark.parametrize(
"x, bins, expected",
[
(
date_range("2017-12-31", periods=3),
[Timestamp.min, Timestamp("2018-01-01"), Timestamp.max],
IntervalIndex.from_tuples(
[
(Timestamp.min, Timestamp("2018-01-01")),
(Timestamp("2018-01-01"), Timestamp.max),
]
),
),
(
[-1, 0, 1],
np.array(
[np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64"
),
IntervalIndex.from_tuples(
[(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)]
),
),
(
[np.timedelta64(-1), np.timedelta64(0), np.timedelta64(1)],
np.array(
[
np.timedelta64(-np.iinfo(np.int64).max),
np.timedelta64(0),
np.timedelta64(np.iinfo(np.int64).max),
]
),
IntervalIndex.from_tuples(
[
(np.timedelta64(-np.iinfo(np.int64).max), np.timedelta64(0)),
(np.timedelta64(0), np.timedelta64(np.iinfo(np.int64).max)),
]
),
),
],
)
def test_bins_monotonic_not_overflowing(x, bins, expected):
# GH 26045
result = cut(x, bins)
tm.assert_index_equal(result.categories, expected)
def test_wrong_num_labels():
msg = "Bin labels must be one fewer than the number of bin edges"
data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1]
with pytest.raises(ValueError, match=msg):
cut(data, [0, 1, 10], labels=["foo", "bar", "baz"])
@pytest.mark.parametrize(
"x,bins,msg",
[
([], 2, "Cannot cut empty array"),
([1, 2, 3], 0.5, "`bins` should be a positive integer"),
],
)
def test_cut_corner(x, bins, msg):
with pytest.raises(ValueError, match=msg):
cut(x, bins)
@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))])
@pytest.mark.parametrize("cut_func", [cut, qcut])
def test_cut_not_1d_arg(arg, cut_func):
msg = "Input array must be 1 dimensional"
with pytest.raises(ValueError, match=msg):
cut_func(arg, 2)
@pytest.mark.parametrize(
"data",
[
[0, 1, 2, 3, 4, np.inf],
[-np.inf, 0, 1, 2, 3, 4],
[-np.inf, 0, 1, 2, 3, 4, np.inf],
],
)
def test_int_bins_with_inf(data):
# GH 24314
msg = "cannot specify integer `bins` when input data contains infinity"
with pytest.raises(ValueError, match=msg):
cut(data, bins=3)
def test_cut_out_of_range_more():
# see gh-1511
name = "x"
ser = Series([0, -1, 0, 1, -3], name=name)
ind = cut(ser, [0, 1], labels=False)
exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name)
tm.assert_series_equal(ind, exp)
@pytest.mark.parametrize(
"right,breaks,closed",
[
(True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"),
(False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left"),
],
)
def test_labels(right, breaks, closed):
arr = np.tile(np.arange(0, 1.01, 0.1), 4)
result, bins = cut(arr, 4, retbins=True, right=right)
ex_levels = IntervalIndex.from_breaks(breaks, closed=closed)
tm.assert_index_equal(result.categories, ex_levels)
def test_cut_pass_series_name_to_factor():
name = "foo"
ser = Series(np.random.randn(100), name=name)
factor = cut(ser, 4)
assert factor.name == name
def test_label_precision():
arr = np.arange(0, 0.73, 0.01)
result = cut(arr, 4, precision=2)
ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72])
tm.assert_index_equal(result.categories, ex_levels)
@pytest.mark.parametrize("labels", [None, False])
def test_na_handling(labels):
arr = np.arange(0, 0.75, 0.01)
arr[::3] = np.nan
result = cut(arr, 4, labels=labels)
result = np.asarray(result)
expected = np.where(isna(arr), np.nan, result)
tm.assert_almost_equal(result, expected)
def test_inf_handling():
data = np.arange(6)
data_ser = Series(data, dtype="int64")
bins = [-np.inf, 2, 4, np.inf]
result = cut(data, bins)
result_ser = cut(data_ser, bins)
ex_uniques = IntervalIndex.from_breaks(bins)
tm.assert_index_equal(result.categories, ex_uniques)
assert result[5] == Interval(4, np.inf)
assert result[0] == Interval(-np.inf, 2)
assert result_ser[5] == Interval(4, np.inf)
assert result_ser[0] == Interval(-np.inf, 2)
def test_cut_out_of_bounds():
arr = np.random.randn(100)
result = cut(arr, [-1, 0, 1])
mask = isna(result)
ex_mask = (arr < -1) | (arr > 1)
tm.assert_numpy_array_equal(mask, ex_mask)
@pytest.mark.parametrize(
"get_labels,get_expected",
[
(
lambda labels: labels,
lambda labels: Categorical(
["Medium"] + 4 * ["Small"] + ["Medium", "Large"],
categories=labels,
ordered=True,
),
),
(
lambda labels: Categorical.from_codes([0, 1, 2], labels),
lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels),
),
],
)
def test_cut_pass_labels(get_labels, get_expected):
bins = [0, 25, 50, 100]
arr = [50, 5, 10, 15, 20, 30, 70]
labels = ["Small", "Medium", "Large"]
result = cut(arr, bins, labels=get_labels(labels))
tm.assert_categorical_equal(result, get_expected(labels))
def test_cut_pass_labels_compat():
# see gh-16459
arr = [50, 5, 10, 15, 20, 30, 70]
labels = ["Good", "Medium", "Bad"]
result = cut(arr, 3, labels=labels)
exp = cut(arr, 3, labels=Categorical(labels, categories=labels, ordered=True))
tm.assert_categorical_equal(result, exp)
@pytest.mark.parametrize("x", [np.arange(11.0), np.arange(11.0) / 1e10])
def test_round_frac_just_works(x):
# It works.
cut(x, 2)
@pytest.mark.parametrize(
"val,precision,expected",
[
(-117.9998, 3, -118),
(117.9998, 3, 118),
(117.9998, 2, 118),
(0.000123456, 2, 0.00012),
],
)
def test_round_frac(val, precision, expected):
# see gh-1979
result = tmod._round_frac(val, precision=precision)
assert result == expected
def test_cut_return_intervals():
ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8])
result = cut(ser, 3)
exp_bins = np.linspace(0, 8, num=4).round(3)
exp_bins[0] -= 0.008
expected = Series(
IntervalIndex.from_breaks(exp_bins, closed="right").take(
[0, 0, 0, 1, 1, 1, 2, 2, 2]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_series_ret_bins():
# see gh-8589
ser = Series(np.arange(4))
result, bins = cut(ser, 2, retbins=True)
expected = Series(
IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,msg",
[
(dict(duplicates="drop"), None),
(dict(), "Bin edges must be unique"),
(dict(duplicates="raise"), "Bin edges must be unique"),
(dict(duplicates="foo"), "invalid value for 'duplicates' parameter"),
],
)
def test_cut_duplicates_bin(kwargs, msg):
# see gh-20947
bins = [0, 2, 4, 6, 10, 10]
values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"])
if msg is not None:
with pytest.raises(ValueError, match=msg):
cut(values, bins, **kwargs)
else:
result = cut(values, bins, **kwargs)
expected = cut(values, pd.unique(bins))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("data", [9.0, -9.0, 0.0])
@pytest.mark.parametrize("length", [1, 2])
def test_single_bin(data, length):
# see gh-14652, gh-15428
ser = Series([data] * length)
result = cut(ser, 1, labels=False)
expected = Series([0] * length)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"array_1_writeable,array_2_writeable", [(True, True), (True, False), (False, False)]
)
def test_cut_read_only(array_1_writeable, array_2_writeable):
# issue 18773
array_1 = np.arange(0, 100, 10)
array_1.flags.writeable = array_1_writeable
array_2 = np.arange(0, 100, 10)
array_2.flags.writeable = array_2_writeable
hundred_elements = np.arange(100)
tm.assert_categorical_equal(
cut(hundred_elements, array_1), cut(hundred_elements, array_2)
)
@pytest.mark.parametrize(
"conv",
[
lambda v: Timestamp(v),
lambda v: to_datetime(v),
lambda v: np.datetime64(v),
lambda v: Timestamp(v).to_pydatetime(),
],
)
def test_datetime_bin(conv):
data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")]
bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"]
expected = Series(
IntervalIndex(
[
Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])),
Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])),
]
)
).astype(CDT(ordered=True))
bins = [conv(v) for v in bin_data]
result = Series(cut(data, bins=bins))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
to_datetime(Series(["2013-01-01", "2013-01-02", "2013-01-03"])),
[
np.datetime64("2013-01-01"),
np.datetime64("2013-01-02"),
np.datetime64("2013-01-03"),
],
np.array(
[
np.datetime64("2013-01-01"),
np.datetime64("2013-01-02"),
np.datetime64("2013-01-03"),
]
),
DatetimeIndex(["2013-01-01", "2013-01-02", "2013-01-03"]),
],
)
def test_datetime_cut(data):
# see gh-14714
#
# Testing time data when it comes in various collection types.
result, _ = cut(data, 3, retbins=True)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:57:07.200000"),
Timestamp("2013-01-01 16:00:00"),
),
Interval(
Timestamp("2013-01-01 16:00:00"), Timestamp("2013-01-02 08:00:00")
),
Interval(
Timestamp("2013-01-02 08:00:00"), Timestamp("2013-01-03 00:00:00")
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(Series(result), expected)
@pytest.mark.parametrize(
"bins",
[
3,
[
Timestamp("2013-01-01 04:57:07.200000"),
Timestamp("2013-01-01 21:00:00"),
Timestamp("2013-01-02 13:00:00"),
Timestamp("2013-01-03 05:00:00"),
],
],
)
@pytest.mark.parametrize("box", [list, np.array, Index, Series])
def test_datetime_tz_cut(bins, box):
# see gh-19872
tz = "US/Eastern"
s = Series(date_range("20130101", periods=3, tz=tz))
if not isinstance(bins, int):
bins = box(bins)
result = cut(s, bins)
expected = Series(
IntervalIndex(
[
Interval(
Timestamp("2012-12-31 23:57:07.200000", tz=tz),
Timestamp("2013-01-01 16:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-01 16:00:00", tz=tz),
Timestamp("2013-01-02 08:00:00", tz=tz),
),
Interval(
Timestamp("2013-01-02 08:00:00", tz=tz),
Timestamp("2013-01-03 00:00:00", tz=tz),
),
]
)
).astype(CDT(ordered=True))
tm.assert_series_equal(result, expected)
def test_datetime_nan_error():
msg = "bins must be of datetime64 dtype"
with pytest.raises(ValueError, match=msg):
cut(date_range("20130101", periods=3), bins=[0, 2, 4])
def test_datetime_nan_mask():
result = cut(
date_range("20130102", periods=5), bins=date_range("20130101", periods=2)
)
mask = result.categories.isna()
tm.assert_numpy_array_equal(mask, np.array([False]))
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([False, True, True, True, True]))
@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])
def test_datetime_cut_roundtrip(tz):
# see gh-19891
ser = Series(date_range("20180101", periods=3, tz=tz))
result, result_bins = cut(ser, 2, retbins=True)
expected = cut(ser, result_bins)
tm.assert_series_equal(result, expected)
expected_bins = DatetimeIndex(
["2017-12-31 23:57:07.200000", "2018-01-02 00:00:00", "2018-01-03 00:00:00"]
)
expected_bins = expected_bins.tz_localize(tz)
tm.assert_index_equal(result_bins, expected_bins)
def test_timedelta_cut_roundtrip():
# see gh-19891
ser = Series(timedelta_range("1day", periods=3))
result, result_bins = cut(ser, 2, retbins=True)
expected = cut(ser, result_bins)
tm.assert_series_equal(result, expected)
expected_bins = TimedeltaIndex(
["0 days 23:57:07.200000", "2 days 00:00:00", "3 days 00:00:00"]
)
tm.assert_index_equal(result_bins, expected_bins)
@pytest.mark.parametrize("bins", [6, 7])
@pytest.mark.parametrize(
"box, compare",
[
(Series, tm.assert_series_equal),
(np.array, tm.assert_categorical_equal),
(list, tm.assert_equal),
],
)
def test_cut_bool_coercion_to_int(bins, box, compare):
# issue 20303
data_expected = box([0, 1, 1, 0, 1] * 10)
data_result = box([False, True, True, False, True] * 10)
expected = cut(data_expected, bins, duplicates="drop")
result = cut(data_result, bins, duplicates="drop")
compare(result, expected)
@pytest.mark.parametrize("labels", ["foo", 1, True])
def test_cut_incorrect_labels(labels):
# GH 13318
values = range(5)
msg = "Bin labels must either be False, None or passed in as a list-like argument"
with pytest.raises(ValueError, match=msg):
cut(values, 4, labels=labels)
@pytest.mark.parametrize("bins", [3, [0, 5, 15]])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
def test_cut_nullable_integer(bins, right, include_lowest):
a = np.random.randint(0, 10, size=50).astype(float)
a[::2] = np.nan
result = cut(
pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest
)
expected = cut(a, bins, right=right, include_lowest=include_lowest)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"data, bins, labels, expected_codes, expected_labels",
[
([15, 17, 19], [14, 16, 18, 20], ["A", "B", "A"], [0, 1, 0], ["A", "B"]),
([1, 3, 5], [0, 2, 4, 6, 8], [2, 0, 1, 2], [2, 0, 1], [0, 1, 2]),
],
)
def test_cut_non_unique_labels(data, bins, labels, expected_codes, expected_labels):
# GH 33141
result = cut(data, bins=bins, labels=labels, ordered=False)
expected = Categorical.from_codes(
expected_codes, categories=expected_labels, ordered=False
)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize(
"data, bins, labels, expected_codes, expected_labels",
[
([15, 17, 19], [14, 16, 18, 20], ["C", "B", "A"], [0, 1, 2], ["C", "B", "A"]),
([1, 3, 5], [0, 2, 4, 6, 8], [3, 0, 1, 2], [0, 1, 2], [3, 0, 1, 2]),
],
)
def test_cut_unordered_labels(data, bins, labels, expected_codes, expected_labels):
# GH 33141
result = cut(data, bins=bins, labels=labels, ordered=False)
expected = Categorical.from_codes(
expected_codes, categories=expected_labels, ordered=False
)
tm.assert_categorical_equal(result, expected)
def test_cut_unordered_with_missing_labels_raises_error():
# GH 33141
msg = "'labels' must be provided if 'ordered = False'"
with pytest.raises(ValueError, match=msg):
cut([0.5, 3], bins=[0, 1, 2], ordered=False)
| bsd-3-clause |
mc-hammertimeseries/cs207project | docs/conf.py | 1 | 8736 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../cs207project")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cs207project'
copyright = u'2016, Jonathan Friedman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from cs207project import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cs207project-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'cs207project Documentation',
u'Jonathan Friedman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit |
jlegendary/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
yanlend/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
gweidner/systemml | src/main/python/systemml/classloader.py | 7 | 8270 | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = [
'createJavaObject',
'jvm_stdout',
'default_jvm_stdout',
'default_jvm_stdout_parallel_flush',
'set_default_jvm_stdout',
'get_spark_context']
import os
import numpy as np
import pandas as pd
import threading
import time
try:
import py4j.java_gateway
from py4j.java_gateway import JavaObject
from pyspark import SparkContext
from pyspark.sql import SparkSession
except ImportError:
raise ImportError(
'Unable to import `pyspark`. Hint: Make sure you are running with PySpark.')
_loadedSystemML = False
def get_spark_context():
"""
Internal method to get already initialized SparkContext. Developers should always use
get_spark_context() instead of SparkContext._active_spark_context to ensure SystemML loaded.
Returns
-------
sc: SparkContext
SparkContext
"""
if SparkContext._active_spark_context is not None:
sc = SparkContext._active_spark_context
global _loadedSystemML
if not _loadedSystemML:
createJavaObject(sc, 'dummy')
_loadedSystemML = True
return sc
else:
raise Exception('Expected spark context to be created.')
_in_jvm_stdout = False
default_jvm_stdout = True
default_jvm_stdout_parallel_flush = True
def set_default_jvm_stdout(enable, parallel_flush=True):
"""
This is useful utility method to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
enable: boolean
Should flush the stdout by default when mlcontext.execute is invoked
parallel_flush: boolean
Should flush the stdout in parallel
"""
global default_jvm_stdout, default_jvm_stdout_parallel_flush
default_jvm_stdout = enable
default_jvm_stdout_parallel_flush = parallel_flush
# This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
# Example usage:
# with jvm_stdout():
# ml.execute(script)
class jvm_stdout(object):
"""
This is useful utility class to get the output of the driver JVM from within a Jupyter notebook
Parameters
----------
parallel_flush: boolean
Should flush the stdout in parallel
"""
def __init__(self, parallel_flush=False):
self.util = get_spark_context()._jvm.org.apache.sysml.api.ml.Utils()
self.parallel_flush = parallel_flush
self.t = threading.Thread(target=self.flush_stdout)
self.stop = False
def flush_stdout(self):
while not self.stop:
time.sleep(1) # flush stdout every 1 second
str = self.util.flushStdOut()
if str != '':
str = str[:-1] if str.endswith('\n') else str
print(str)
def __enter__(self):
global _in_jvm_stdout
if _in_jvm_stdout:
# Allow for nested jvm_stdout
self.donotRedirect = True
else:
self.donotRedirect = False
self.util.startRedirectStdOut()
if self.parallel_flush:
self.t.start()
_in_jvm_stdout = True
def __exit__(self, *args):
global _in_jvm_stdout
if not self.donotRedirect:
if self.parallel_flush:
self.stop = True
self.t.join()
print(self.util.stopRedirectStdOut())
_in_jvm_stdout = False
_initializedSparkSession = False
def _createJavaObject(sc, obj_type):
# -----------------------------------------------------------------------------------
# Avoids race condition between locking of metastore_db of Scala SparkSession and PySpark SparkSession.
# This is done at toDF() rather than import level to avoid creation of
# SparkSession in worker processes.
global _initializedSparkSession
if not _initializedSparkSession:
_initializedSparkSession = True
SparkSession.builder.getOrCreate().createDataFrame(
pd.DataFrame(np.array([[1, 2], [3, 4]])))
# -----------------------------------------------------------------------------------
if obj_type == 'mlcontext':
return sc._jvm.org.apache.sysml.api.mlcontext.MLContext(sc._jsc)
elif obj_type == 'dummy':
return sc._jvm.org.apache.sysml.utils.SystemMLLoaderUtils()
else:
raise ValueError(
'Incorrect usage: supported values: mlcontext or dummy')
def _getJarFileNames(sc):
import imp
import fnmatch
jar_file_name = '_ignore.jar'
java_dir = os.path.join(imp.find_module("systemml")[1], "systemml-java")
jar_file_names = []
for file in os.listdir(java_dir):
if fnmatch.fnmatch(
file, 'systemml-*-SNAPSHOT.jar') or fnmatch.fnmatch(file, 'systemml-*.jar'):
jar_file_names = jar_file_names + [os.path.join(java_dir, file)]
return jar_file_names
def _getLoaderInstance(sc, jar_file_name, className, hint):
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
if os.path.isfile(jar_file_name):
sc._jsc.addJar(jar_file_name)
jar_file_url = sc._jvm.java.io.File(jar_file_name).toURI().toURL()
url_class = sc._jvm.java.net.URL
jar_file_url_arr = sc._gateway.new_array(url_class, 1)
jar_file_url_arr[0] = jar_file_url
url_class_loader = sc._jvm.java.net.URLClassLoader(
jar_file_url_arr, sc._jsc.getClass().getClassLoader())
c1 = sc._jvm.java.lang.Class.forName(className, True, url_class_loader)
return c1.newInstance()
else:
raise ImportError(
err_msg +
' Hint: Download the jar from http://systemml.apache.org/download and ' +
hint)
def createJavaObject(sc, obj_type):
"""
Performs appropriate check if SystemML.jar is available and returns the handle to MLContext object on JVM
Parameters
----------
sc: SparkContext
SparkContext
obj_type: Type of object to create ('mlcontext' or 'dummy')
"""
try:
return _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
ret = None
err_msg = 'Unable to load systemml-*.jar into current pyspark session.'
hint = 'Provide the following argument to pyspark: --driver-class-path '
jar_file_names = _getJarFileNames(sc)
if len(jar_file_names) != 2:
raise ImportError(
'Expected only systemml and systemml-extra jars, but found ' +
str(jar_file_names))
for jar_file_name in jar_file_names:
if 'extra' in jar_file_name:
x = _getLoaderInstance(
sc,
jar_file_name,
'org.apache.sysml.api.dl.Caffe2DMLLoader',
hint + 'systemml-*-extra.jar')
x.loadCaffe2DML(jar_file_name)
else:
x = _getLoaderInstance(
sc,
jar_file_name,
'org.apache.sysml.utils.SystemMLLoaderUtils',
hint + 'systemml-*.jar')
x.loadSystemML(jar_file_name)
try:
ret = _createJavaObject(sc, obj_type)
except (py4j.protocol.Py4JError, TypeError):
raise ImportError(err_msg + ' Hint: ' + hint + jar_file_name)
return ret
| apache-2.0 |
zakahmad/kNN | kNN.py | 1 | 2966 | ###
###
# This is an implementation of the kNN algorithm. The code is written with
# readability in mind, and most importantly, learning how the algorithm works.
# I recommend using kNN from a generic machine learning library (sci-kit learn)
# as those are usually optimized for performance.
###
### By: Zak Ahmad ([email protected])
###
# this code will run only if numpy and pandas are installed
# if not, please download numpy from http://www.scipy.org/scipylib/download.html
# and pandas from http://pandas.pydata.org/getpandas.html
import numpy as np
import pandas as pd
import random
# For more information about the 1974 Motor Trend Car Road Test dataset
# see http://stat.ethz.ch/R-manual/R-devel/library/datasets/html/mtcars.html
# -------------------- read the data --------------------
# specify file name containing the data
filename = "cardata.csv"
# create a pandas dataframe containing the data in the csv file
cardata = pd.read_csv(filename)
# find the length (number of rows) of data
data_length = len(cardata)
# we will use only 60% of the data to construct our training set
train_length = int(0.6 * data_length)
# --------------------- prepare data --------------------
# generate numpy array from zero to length of data used for indices
indices = np.arange(data_length)
# randomize the order of the indices
random.shuffle(indices)
# create indices for training set from the randomized indices
train_indices = indices[0:train_length]
# output feature we are interested in predicting
yfeature = ["mpg"]
# input feature(s) we will use to construct our prediction model
xfeatures = ["cyl","disp","wt"]
# this creates the training set which constructs our prediction model
X_train_data = cardata.ix[train_indices,xfeatures]
Y_train_data = cardata.ix[train_indices,yfeature]
# ------------------ predict, using data -----------------
# numpy array containing the features of the vehicle we want to predict
# note the order of the array elements has to be the same as xfeatures
my_vehicle = np.array([4,80,1.5]) # 4 cylinders, 80 cubic inches, 1.500 lb/1000
# k is the number of elements we will be averaging for our prediction
k = 3
if k < 1:
# k should atleast be 1
k = 1
elif k > len(Y_train_data):
# k cannot be more than the training set's length
k = len(Y_train_data)
# this is where the "prediction begins", we compute the Euclidean distance
# squared difference between my_vehicle and all x training set (input features)
sqdiff = (X_train_data - my_vehicle)**2
# now compute the Euclidean distance for each row (axis = 1)
dist = np.sqrt(sqdiff.sum(axis=1))
# store the Euclidean distance into a new column in the y training set (output)
Y_train_data["dist"] = dist
# sort the y training set by the Euclidean distance
sorted_output = Y_train_data.sort("dist")
# get the yfeature as a numpy array and compute mean of only k elements
prediction = np.mean(sorted_output.as_matrix(yfeature)[0:k])
print prediction
| gpl-2.0 |
shunliz/test | python/stock/stock2.py | 1 | 1525 | import tushare as ts
import csv
import time
import pandas as pd
pro = ts.pro_api('1dbda79ce58d052196b7ddec1663d53e4ea20571195a1a6055aab0c7')
stock_basic = pro.stock_basic(list_status='L', fields='ts_code, symbol, name, industry')
# 重命名行,便于后面导入neo4j
basic_rename = {'ts_code': 'TS代码', 'symbol': '股票代码', 'name': '股票名称', 'industry': '行业'}
stock_basic.rename(columns=basic_rename, inplace=True)
# 保存为stock.csv
stock_basic.to_csv('stock.csv', encoding='gbk')
# 获取top10_holders
holders = pd.DataFrame(columns=('ts_code', 'ann_date', 'end_date', 'holder_name', 'hold_amount', 'hold_ratio'))
# 获取一年内所有上市股票股东信息(可以获取一个报告期的)
for i in range(3610):
code = stock_basic['TS代码'].values[i]
top10_holders = pro.top10_holders(ts_code=code, start_date='20180101', end_date='20181231')
holders = holders.append(top10_holders)
time.sleep(0.3)# 数据接口限制
# 保存为holders.csv
holders.to_csv('holders.csv', encoding='gbk')
# 获取concept,并查看概念分类数量
concept = pro.concept()
concept.to_csv('concept_num.csv', encoding='gbk')
# 获取concept_detail
concept_details = pd.DataFrame(columns=('id', 'concept_name', 'ts_code', 'name'))
for i in range(358):
id = 'TS' + str(i)
concept_detail = pro.concept_detail(id=id)
concept_details = concept_details.append(concept_detail)
time.sleep(0.3)
# 保存为concept_detail.csv
concept_details.to_csv('concept.csv', encoding='gbk') | apache-2.0 |
jmcq89/megaman | megaman/geometry/laplacian.py | 3 | 5335 | # LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import numpy as np
from scipy.sparse import isspmatrix
from sklearn.utils.validation import check_array
from .utils import RegisterSubclasses
def compute_laplacian_matrix(affinity_matrix, method='auto', **kwargs):
"""Compute the laplacian matrix with the given method"""
if method == 'auto':
method = 'geometric'
return Laplacian.init(method, **kwargs).laplacian_matrix(affinity_matrix)
def laplacian_methods():
"""Return the list of valid laplacian methods"""
return ['auto'] + list(Laplacian.methods())
class Laplacian(RegisterSubclasses):
"""Base class for computing laplacian matrices
Notes
-----
The methods here all return the negative of the standard
Laplacian definition.
"""
symmetric = False
def __init__(self, symmetrize_input=True,
scaling_epps=None, full_output=False):
self.symmetrize_input = symmetrize_input
self.scaling_epps = scaling_epps
self.full_output = full_output
@staticmethod
def _symmetrize(A):
# TODO: make this more efficient?
return 0.5 * (A + A.T)
@classmethod
def symmetric_methods(cls):
for method in cls.methods():
if cls.get_method(method).symmetric:
yield method
@classmethod
def asymmetric_methods(cls):
for method in cls.methods():
if not cls.get_method(method).symmetric:
yield method
def laplacian_matrix(self, affinity_matrix):
affinity_matrix = check_array(affinity_matrix, copy=False, dtype=float,
accept_sparse=['csr', 'csc', 'coo'])
if self.symmetrize_input:
affinity_matrix = self._symmetrize(affinity_matrix)
if isspmatrix(affinity_matrix):
affinity_matrix = affinity_matrix.tocoo()
else:
affinity_matrix = affinity_matrix.copy()
lap, lapsym, w = self._compute_laplacian(affinity_matrix)
if self.scaling_epps is not None and self.scaling_epps > 0.:
if isspmatrix(lap):
lap.data *= 4 / (self.scaling_epps ** 2)
else:
lap *= 4 / (self.scaling_epps ** 2)
if self.full_output:
return lap, lapsym, w
else:
return lap
def _compute_laplacian(self, lap):
raise NotImplementedError()
class UnNormalizedLaplacian(Laplacian):
name = 'unnormalized'
symmetric = True
def _compute_laplacian(self, lap):
w = _degree(lap)
_subtract_from_diagonal(lap, w)
return lap, lap, w
class GeometricLaplacian(Laplacian):
name = 'geometric'
symmetric = False
def _compute_laplacian(self, lap):
_normalize_laplacian(lap, symmetric=True)
lapsym = lap.copy()
w, nonzero = _normalize_laplacian(lap, symmetric=False)
_subtract_from_diagonal(lap, nonzero)
return lap, lapsym, w
class RandomWalkLaplacian(Laplacian):
name = 'randomwalk'
symmetric = False
def _compute_laplacian(self, lap):
lapsym = lap.copy()
w, nonzero = _normalize_laplacian(lap, symmetric=False)
_subtract_from_diagonal(lap, nonzero)
return lap, lapsym, w
class SymmetricNormalizedLaplacian(Laplacian):
name = 'symmetricnormalized'
symmetric = True
def _compute_laplacian(self, lap):
w, nonzero = _normalize_laplacian(lap, symmetric=True, degree_exp=0.5)
_subtract_from_diagonal(lap, nonzero)
return lap, lap, w
class RenormalizedLaplacian(Laplacian):
name = 'renormalized'
symmetric = False
def __init__(self, symmetrize_input=True,
scaling_epps=None,
full_output=False,
renormalization_exponent=1):
self.symmetrize_input = symmetrize_input
self.scaling_epps = scaling_epps
self.full_output = full_output
self.renormalization_exponent = renormalization_exponent
def _compute_laplacian(self, lap):
_normalize_laplacian(lap, symmetric=True,
degree_exp=self.renormalization_exponent)
lapsym = lap.copy()
w, nonzero = _normalize_laplacian(lap, symmetric=False)
_subtract_from_diagonal(lap, nonzero)
return lap, lapsym, w
# Utility routines: these operate in-place and assume either coo matrix or
# dense array
def _degree(lap):
return np.asarray(lap.sum(1)).squeeze()
def _divide_along_rows(lap, vals):
if isspmatrix(lap):
lap.data /= vals[lap.row]
else:
lap /= vals[:, np.newaxis]
def _divide_along_cols(lap, vals):
if isspmatrix(lap):
lap.data /= vals[lap.col]
else:
lap /= vals
def _normalize_laplacian(lap, symmetric=False, degree_exp=None):
w = _degree(lap)
w_nonzero = (w != 0)
w[~w_nonzero] = 1
if degree_exp is not None:
w **= degree_exp
if symmetric:
_divide_along_rows(lap, w)
_divide_along_cols(lap, w)
else:
_divide_along_rows(lap, w)
return w, w_nonzero
def _subtract_from_diagonal(lap, vals):
if isspmatrix(lap):
lap.data[lap.row == lap.col] -= vals
else:
lap.flat[::lap.shape[0] + 1] -= vals
| bsd-2-clause |
wechselstrom/GNG_Reservoir | GNGReservoir.py | 1 | 4895 | #!/usr/bin/python3.4
import numpy as np
import scipy as sp
from numpy.linalg import norm
from sklearn.preprocessing import normalize
import sklearn.linear_model
sigmaSq = 6
def genInitValues(numInp, numHidd):
N = numInp+numHidd
M = numHidd
density = 1/sp.sqrt(N)
W = sp.sparse.rand(N, M, density)
negValDensity = 0.3
nM = sp.sign(sp.rand(N, M)-1+negValDensity)
W = W.multiply(nM)
W = normalize(W, axis=0, copy=False).T
return (sp.sparse.csr_matrix(W))
def oneOfKtoStr(ook):
if sp.sparse.issparse(ook):
ook = ook.toarray()
indices = np.argmax(ook,axis=1)
s = ''
for i in indices:
if i==0:
s+='.'
elif i==1:
s+=','
elif i==2:
s+=' '
elif i==3:
s+='\''
else: s+=chr(i+ord('a')-4)
return(s)
def oneOfK(string):
i = 0
row = []
col = []
data = []
d = 0
for x in string:
if x.islower():
d = ord(x)-ord('a')+4
elif x.isupper():
d = ord(x)-ord('A')+4
elif x=='.':
d = 0
elif x == ',':
d = 1
elif x == ' ':
d = 2
elif x == '\'':
d = 3
else:
continue
row.append(i)
col.append(d)
data.append(1.0)
i+=1
return sp.sparse.csr_matrix((data, (row,col)), shape=(i,30))
def step(inp, states, W):
S = np.concatenate([np.squeeze(inp), states])
Xcurr = generateSparseActivationMatrix(S, W)
Diff = (Xcurr-W)
Diff.data = Diff.data**2
Xnorm = np.array(Diff.sum(axis=1))
statesNext = np.squeeze(sp.exp(-(1/2*sigmaSq)*Xnorm))
return statesNext
def computeOutputs(states,RR):
return sp.squeeze(RR.predict(states))
def runWithStimuli(inp, W, RR=None, states=None):
if states is None:
states = sp.zeros(W.shape[0])
for i in range(inp.shape[0]):
states = step(inp[i,:].toarray().T, states, W)
if RR is None:
yield states
else:
out = computeOutputs(states, RR)
yield out,states
def trainWeights(inp, outp, W, a):
states = runStimulitoArray(inp,W)
RR = sklearn.linear_model.Ridge(alpha=a)
if sp.sparse.issparse(outp):
outp=outp.toarray()
RR.fit(states, outp)
return RR
def runStimulitoArray(stim, W, RR=None):
if RR is None:
return sp.array([val for val in runWithStimuli(stim, W)])
else:
x = [val for val in runWithStimuli(stim, W, RR)]
out, states = zip(*x)
return sp.array(out),sp.array(states)
def generateSparseActivationMatrix(S, W):
Wc = W.copy()
Wc.data = S[Wc.indices]
Wc = normalize(Wc,axis=1, norm='l2')
return Wc
def testcase2(filename, trainsamples, numStates, lastChars, alpha):
with open(filename,'r') as f:
cont = f.read()
inp1 = oneOfK(cont[:trainsamples])
*x1, y1 = genlistOfShiftedStrings(inp1)
x1 = sp.sparse.hstack(x1).tocsr()
W = genInitValues(x1.shape[1], numStates)
RR = trainWeights(x1, y1, W, alpha)
states = sp.zeros(W.shape[0])
x = oneOfK('lala').toarray().reshape(1,120)
s = ''
for i in range(1000):
states = step(x,states,W)
out = computeOutputs(states,RR)
x[0,0:90] = x[0,30:120]
x[0,90:120] = out
s += oneOfKtoStr([out])
return s
genlistOfShiftedStrings = lambda inp: [inp[i:-(lastChars-i)] for i in range(0,lastChars)]
def testcase1(filename, trainsamples, testsamples, numStates, lastChars, alpha):
with open(filename,'r') as f:
cont = f.read()
inp1 = oneOfK(cont[:trainsamples])
*x1, y1 = genlistOfShiftedStrings(inp1)
x1 = sp.sparse.hstack(x1).tocsr()
inp2 = oneOfK(cont[trainsamples:trainsamples+testsamples])
*x2, y2 = genlistOfShiftedStrings(inp2)
x2 = sp.sparse.hstack(x2).tocsr()
print('finished generation of testsamples')
W = genInitValues(x1.shape[1], numStates)
print('initialized network randomly')
RR = trainWeights(x1, y1, W, alpha)
print('finished training')
out1, _ = runStimulitoArray(x1, W, RR)
out2, _ = runStimulitoArray(x2, W, RR)
print('ran on testset')
return oneOfKtoStr(y1),oneOfKtoStr(out1), oneOfKtoStr(y2), oneOfKtoStr(out2)
if __name__ == '__main__':
filename = 'shakespeare.txt'
trainsamples = 15000
lastChars = 5
numStates = 3000
testsamples = 2000
alpha=1e-4
y1,out1,y2,out2 = testcase1(filename, trainsamples, testsamples, numStates, lastChars, alpha)
print('\noriginal traindata:\n%s\nreconstructed traindata:\n%s\noriginal testdata:\n%s\nrecosntructed testdata:\n%s\n' % (y1,out1,y2,out2))
s = testcase2(filename, trainsamples, numStates, lastChars, alpha)
print(s)
| gpl-2.0 |
liyu1990/sklearn | sklearn/ensemble/gradient_boosting.py | 4 | 70898 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly, Jacob Schreiber
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
import numbers
import numpy as np
from scipy import stats
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from time import time
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE
from ..tree._tree import TREE_LEAF
from ..utils import check_random_state
from ..utils import check_array
from ..utils import check_X_y
from ..utils import column_or_1d
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.fixes import bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight,
self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.presort = presort
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
random_state, X_idx_sorted, X_csc=None, X_csr=None):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
presort=self.presort)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
if X_csc is not None:
tree.fit(X_csc, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
else:
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False, X_idx_sorted=X_idx_sorted)
# update tree leaves
if X_csr is not None:
loss.update_terminal_regions(tree.tree_, X_csr, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
else:
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
X_idx_sorted = None
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if presort == 'auto' and issparse(X):
presort = False
elif presort == 'auto':
presort = True
if self.presort == True:
if issparse(X):
raise ValueError("Presorting is not supported for sparse matrices.")
else:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor, X_idx_sorted)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None, X_idx_sorted=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, random_state, X_idx_sorted,
X_csc, X_csr)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
self._check_initialized()
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators, n_classes]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
In the case of binary classification n_classes is 1.
"""
self._check_initialized()
X = self.estimators_[0, 0]._validate_X_predict(X, check_input=True)
# n_classes will be equal to 1 in the binary classification or the
# regression case.
n_estimators, n_classes = self.estimators_.shape
leaves = np.zeros((X.shape[0], n_estimators, n_classes))
for i in range(n_estimators):
for j in range(n_classes):
estimator = self.estimators_[i, j]
leaves[:, i, j] = estimator.apply(X, check_input=False)
return leaves
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, ``loss_.K``]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False,
presort='auto'):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort=presort)
def _validate_y(self, y):
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool or 'auto', optional (default='auto')
Whether to presort the data to speed up the finding of best splits in
fitting. Auto mode by default will use presorting on dense data and
default to normal sorting on sparse data. Setting presort to true on
sparse data will raise an error.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False, presort='auto'):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start,
presort='auto')
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
def apply(self, X):
"""Apply trees in the ensemble to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the ensemble,
return the index of the leaf x ends up in in each estimator.
"""
leaves = super(GradientBoostingRegressor, self).apply(X)
leaves = leaves.reshape(X.shape[0], self.estimators_.shape[0])
return leaves
| bsd-3-clause |
ryanfobel/dmf_control_board | docs/rename.py | 3 | 2578 | import sys
import pandas as pd
from path_helpers import path
def main(root, old_name, new_name):
names = pd.Series([old_name, new_name], index=['old', 'new'])
underscore_names = names.map(lambda v: v.replace('-', '_'))
camel_names = names.str.split('-').map(lambda x: ''.join([y.title()
for y in x]))
# Replace all occurrences of provided original name with new name, and all
# occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Dashes are used in Python package names, but underscores are used in
# Python module names.
for p in path(root).walkfiles():
data = p.bytes()
if '.git' not in p and (names.old in data or
underscore_names.old in data or
camel_names.old in data):
p.write_bytes(data.replace(names.old, names.new)
.replace(underscore_names.old, underscore_names.new)
.replace(camel_names.old, camel_names.new))
def rename_path(p):
if '.git' in p:
return
if underscore_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(underscore_names.old,
underscore_names.new)))
if camel_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(camel_names.old,
camel_names.new)))
# Rename all files/directories containing original name with new name, and
# all occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Process list of paths in *reverse order* to avoid renaming parent
# directories before children.
for p in sorted(list(path(root).walkdirs()))[-1::-1]:
rename_path(p)
for p in path(root).walkfiles():
rename_path(p)
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Rename template project with'
'hyphen-separated <new name> (path names and in '
'files).')
parser.add_argument('new_name', help='New project name (e.g., '
' `my-new-project`)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main('.', 'dmf-control-board-firmware', args.new_name)
| gpl-3.0 |
tdhopper/scikit-learn | sklearn/datasets/svmlight_format.py | 79 | 15976 | """This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
is_sp = int(hasattr(X, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_labels]
Target values. Class labels must be an integer or float, or array-like
objects of integer or float for multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
y = np.asarray(y)
if y.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (y.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != y.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], y.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
| bsd-3-clause |
BDonnot/TensorflowHelpers | TensorflowHelpers/DataHandler.py | 1 | 45772 | import random
import datetime
import os
import pdb
import numpy as np
import tensorflow as tf
from .Layers import DTYPE_USED, DTYPE_NPY
# TODO READER: make them behave equally well, for now only TFRecordReader can preprocess for example
# TODO: READERS: correct the but when encountering nan's or infinite value in all reader classes
# TODO: have a better implementation of preprocessing function
class ExpDataReader:
ms_tensor=True # is the 'ms' field a tensor or a numpy array
def __init__(self, train, batch_size, fun_preprocess=lambda x: x):
"""Read the usefull data for the Experience to run
Store both input and outputs
:param train: if True this data reader concerns the training set
:param batch_size: number of data to read each time the iterator is called
"""
self.dataX = np.zeros(
(0, 0), dtype=DTYPE_NPY) # main data set (eg the training set)
# main data set (eg the training set)
self.dataY = np.zeros((0, 0), dtype=DTYPE_NPY)
self.dataset = tf.data.Dataset.from_tensor_slices(
(self.dataX, self.dataY))
self.ms = {"input": tf.constant(0., dtype=DTYPE_USED), "output": tf.constant(0., dtype=DTYPE_USED)}
self.sds = {"input": tf.constant(1., dtype=DTYPE_USED), "output": tf.constant(1., dtype=DTYPE_USED)}
def nrowsX(self):
"""
:return: The number of rows / examples of the input data set
"""
return self._nrows(self.dataX)
def _nrowsY(self):
"""
:return: The number of rows / examples of the output data set
"""
return self._nrows(self.dataY)
def _nrows(self, array):
"""
:param array: the concerned "array"
:return: The number of rows / examples of the data set "array"
"""
return array.shape[0]
def init(self, sess):
"""
Initialize the datasets (if need to be done)
:return:
"""
pass
def _shape_properly(self, ms, name):
"""
Transform a dictionnary of numpy array in a dictionnary of tensorflow tensor
:param ms:
:param name:
:return:
"""
return {k: tf.convert_to_tensor(v, name="{}_{}".format(name, k), dtype=DTYPE_USED) for k, v in ms.items()}
# TODO refactor ExpCSVDataReader and ExpTFrecordDataReader
# TODO at least _nrows, _ncols and _shape_properly are copy paste.
# TODO beside the logic is exactly the same!
class ExpCSVDataReader(ExpDataReader):
def __init__(self, train, batch_size, pathdata=".",
filename={"input": "X.csv", "output": "Y.csv"},
sizes={"input":1, "output":1},
num_thread=4, donnotcenter={},
fun_preprocess=lambda x: x,
ms=None, sds=None):
"""
:param train: if true concern the training set
:param batch_size: number of data to unpack each time
:param pathdata: path where the data are stored
:param filenames: names of the files with input data and output data [should be a dictionnary with keys as sizes]
:param sizes: number of columns of the data in X and Y [should be a 2 keys dictionnaries with keys "input" and "output"]
:param other_dataset: other files (same format as
:param num_thread: number of thread to read the data
:param donnotcenter: iterable: variable that won't be centered/reduced
:param ms: means of X data set (used for validation instead of recomputing the mean)
:param sds: standard deviation of Y data set (used for validation instead of recomputing the std)
"""
ExpDataReader.__init__(self, train=train, batch_size=batch_size)
self.sizes = sizes
# TODO optimization: do not parse the file if you nrows (training set parsed 2 times)
# count the number of lines
if (ms is None) or (sds is None):
fun_process = self._normalize
else:
fun_process = self._countlines
ms__, sds__, self.nrows = fun_process(path=pathdata, fns=filename, sizes=sizes)
ms_ = {}
sds_ = {}
for k in ms__.keys():
if k in donnotcenter:
ms_[k] = np.zeros(ms__[k].shape, dtype=DTYPE_NPY)
sds_[k] = np.ones(sds__[k].shape, dtype=DTYPE_NPY)
else:
ms_[k] = ms__[k]
sds_[k] = sds__[k]
self.ms = self._shape_properly(ms_, name="means") if ms is None else ms
self.sds = self._shape_properly(sds_, name="stds") if sds is None else sds
self.datasets = {}
for el in sizes.keys():
self.datasets[el] = tf.data.TextLineDataset(
[os.path.join(pathdata, filename[el])]).skip(1).map(
lambda line: self._parse_function(line, size=sizes[el], m=self.ms[el], std=self.sds[el]),
num_parallel_calls=num_thread
).prefetch(num_thread * 5)
self.dataset = tf.data.Dataset.zip(self.datasets)
if train:
self.dataset = self.dataset.repeat(-1)
self.dataset = self.dataset.shuffle(buffer_size=10000)
else:
self.dataset = self.dataset.repeat(1)
self.dataset = self.dataset.batch(batch_size=batch_size)
def _normalize(self, path, sizes, fns):
"""
Compute some statistics of the files fns.
:param path: the path where data are located
:param sizes: a dictionnary (with keys ["input", "output"]) containing the number of rows of the datasets
:param fns: a dictionnary (with keys ["input", "output"]) containing the names of the datasets
:return:
"""
ms = {}
sds = {}
nrows = None
prev_files = set()
for key in sizes.keys():
m, sd, nrows_tmp = self._normalize_aux(path, size=sizes[key], fn=fns[key])
ms[key] = m
sds[key] = sd
if nrows is not None:
if nrows_tmp != nrows:
error_str = "ExpCSVDataReader._normalize: The files {} and {} (located at {}) "
error_str += "does not count the same number of lines."
raise RuntimeError(error_str.format(fns["input"], prev_files, path))
prev_files.add(fns[key])
# mX, sdX, nrowsX = self._normalize_aux(path, size=sizes["input"], fn=fns["input"])
# mY, sdY, nrowsY = self._normalize_aux(path, size=sizes["output"], fn=fns["output"])
# if nrowsX != nrowsY:
# error_str = "ExpCSVDataReader._normalize: The files {} and {} (located at {}) "
# error_str += "does not count the same number of lines."
# raise RuntimeError(error_str.format(fns["input"], fns["output"], path))
#
# ms = {"input": mX, "output": mY}
# sds = {"input": sdX, "output": sdY}
return ms, sds, nrows
def _normalize_aux(self, path, size, fn):
"""
Compute some statistics of the file fn.
fn should be a csv file with a semi-colon separator, copntaining only float objects, with a single header line.
:param path: the path where the file is
:param size: dimension of the file (number of columns)
:param fn: the file name
:return: the mean, the standard deviation, and the number of rows
"""
count = 0
acc = np.zeros(shape=(size))
acc2 = np.zeros(shape=(size))
with open(os.path.join(path, fn)) as fp:
fp.readline() # do not parse the header
for (count, li) in enumerate(fp, 1):
spl = li.split(";")
acc += [float(el) for el in spl]
acc2 += [float(el) * float(el) for el in spl]
count += 1
acc /= count
acc2 /= count
m = acc
std = np.sqrt(acc2 - acc * acc)
std[std <= 1e-3] = 1.
m = {k:v.astype(DTYPE_NPY) for k, v in m}
std = {k:v.astype(DTYPE_NPY) for k, v in std}
return m, std, count
def _countlines(self, path, sizes, fns):
"""
Compute the number of lines of both files fns (and check they match).
each file in fns should be a csv file with a semi-colon separator, copntaining only float objects, with a single header line.
:param path: the path where the file is
:param size: dimension of the file (number of columns)
:param fns: the file name
:return: the mean, the standard deviation, and the number of rows
"""
nrows = None
prev_files = set()
for key in sizes.keys():
nrows_tmp = self._countlines_aux(path, fn=fns[key])
if nrows is not None:
if nrows_tmp != nrows:
error_str = "ExpCSVDataReader._normalize: The files {} and {} (located at {}) "
error_str += "does not count the same number of lines."
raise RuntimeError(error_str.format(fns[key], prev_files, path))
nrows = nrows_tmp
prev_files.add(fns[key])
ms = {k: np.zeros(v, dtype=DTYPE_NPY) for k, v in sizes.items()}
sds = {k: np.ones(v, dtype=DTYPE_NPY) for k, v in sizes.items()}
return ms, sds, nrows
def _countlines_aux(self, path, fn):
"""
Compute the number of rows of the file fn.
fn should be a csv file with a coma separator, copntaining only float objects, with a single header line.
:param path: the path where the file is located
:param fn: the file name
:return: the mean, the standard deviation, and the number of rows
"""
count = 0
with open(os.path.join(path, fn)) as fp:
fp.readline() # do not count the header
for (count, li) in enumerate(fp, 1):
pass
return count
def _parse_function(self, csv_row, size, m, std):
"""
Read the data in the csv format
:param csv_row: the lines to parse
:param size: the number of columns
:param m: the mean over the whole data set
:param std: the standar deviation over the whole data set
:return:
"""
# TODO make a cleaner version of preprocessing!
record_defaults = [[0.0] for _ in range(size)]
row = tf.decode_csv(csv_row,
record_defaults=record_defaults,
field_delim=";")
row = row - m
row = row / std
return row
def _nrows(self, array):
"""
:param array: unused
:return: the number of rows in the training set
"""
return self.nrows
class ExpTFrecordsDataReader(ExpDataReader):
def __init__(self, train, batch_size, donnotcenter={},
pathdata=".",
filename="data.tfrecord",
sizes={"input":1, "output":1},
num_thread=4,
ms=None, sds=None,
fun_preprocess=None,
add_noise={},
dtypes={}):
"""
ms (and sds) should be None or dictionnaries with at least the keys in vars, and tensorflow float32 tensors as values
:param train: if true concern the training set
:param batch_size: number of data to unpack each time
:param donnotcenter: iterable: variable that will not be centered / reduced
:param pathdata: path where the data are stored
:param filenames: names of the files with input data and output data
:param sizes: number of columns of the data in X and Y
:param num_thread: number of thread to read the data
:param ms: means of data set (used for validation instead -- of recomputing the mean).
:param sds: standard deviation of data set (used for validation instead -- of recomputing the mean)
:param fun_preprocess: fun use to preprocess data (before centering / reducing), (dictionnary with variable name as key)
:param add_noise: iterable: in which data do you add noise
"""
if type(filename) == type(""):
filename = {filename}
ExpDataReader.__init__(self, train=train, batch_size=batch_size)
self.sizes = sizes
self.num_thread = num_thread
self.batch_size = batch_size
self.dtypes = dtypes
# pdb.set_trace()
self.features = {k: tf.FixedLenFeature((val,), tf.float32 if k not in dtypes else dtypes[k]
# ,default_value=[0.0 for _ in range(val)]
)
for k, val in sizes.items()}
self.funs_preprocess = {k: tf.identity for k in sizes.keys()}
if fun_preprocess is not None:
for k, fun in fun_preprocess.items():
self.funs_preprocess[k] = fun
self.donnotcenter = donnotcenter
# TODO optimization: do not parse the file if you know nrows (training set parsed 2 times)
# add noise when training, with customizable variance
if len(add_noise) and train:
self.sigma_noise = tf.get_variable(name="noise_std", trainable=False)
self.amount_noise_ph = tf.placeholder(dtype=DTYPE_USED, shape=(), name="skip_conn")
self.assign_noise = tf.assign(self.sigma_noise, self.amount_noise_ph, name="assign_noise_std")
else:
self.amount_noise_ph = tf.placeholder(dtype=DTYPE_USED, shape=(), name="skip_conn")
self.assign_noise = tf.no_op(name="donothing_noise_std")
# count the number of lines
if (ms is None) or (sds is None):
fun_process = self._normalize
else:
fun_process = self._countlines
ms__, sds__, self.nrows = fun_process(
path=pathdata, fn=filename, sizes=sizes)
ms_ = {}
sds_ = {}
for k in ms__.keys():
if k in donnotcenter:
ms_[k] = np.zeros(ms__[k].shape, dtype=DTYPE_NPY)
sds_[k] = np.ones(sds__[k].shape, dtype=DTYPE_NPY)
else:
ms_[k] = ms__[k]
sds_[k] = sds__[k]
# if "cali_tempo.tfrecord" in filename :# or 'val.tfrecord' in filename:
# self._normalize(path=pathdata, fn=filename, sizes=sizes)
# pdb.set_trace()
self.ms = self._shape_properly(ms_, name="means") if ms is None else ms
self.sds = self._shape_properly(sds_, name="stds") if sds is None else sds
self.dataset = tf.data.TFRecordDataset(
[os.path.join(pathdata, fn) for fn in filename]).map(
lambda line: self._parse_function(example_proto=line, sizes=sizes, ms=self.ms, stds=self.sds),
num_parallel_calls=num_thread,
# output_buffer_size=num_thread * 5
).prefetch(num_thread * 5)
# self.dataset = self.dataset.shard(10, 2)
if train:
self.dataset = self.dataset.repeat(-1)
self.dataset = self.dataset.shuffle(buffer_size=10000)
else:
self.dataset = self.dataset.repeat(1)
self.dataset = self.dataset.batch(batch_size=batch_size)
def _countlines(self, path, fn, sizes):
"""
:param path: the path where data are located
:param fn: the file names
:return: the number of lines of the files (must iterate through it line by line)
"""
ms = {el: np.zeros(1, dtype=DTYPE_NPY) for el in sizes}
sds = {el: np.ones(1, dtype=DTYPE_NPY) for el in sizes}
nb_total = 0
for fn_ in [os.path.join(path, el) for el in fn]:
nb = 0
for nb, record in enumerate(tf.python_io.tf_record_iterator(fn_)):
pass
if nb != 0:
# if the file is empty, we don't have to add one line...
nb_total += nb+1
# don't forget to add 1 because python start at 0!
return ms, sds, nb_total
def _parse_function(self, example_proto, sizes, ms, stds):
"""
:param example_proto:
:param sizes:
:param ms:
:param stds:
:return:
"""
parsed_features = tf.parse_single_example(example_proto, self.features)
# TODO faster if I batch first! (use tf.pase_example instead)
for k in sizes.keys():
parsed_features[k] = self.funs_preprocess[k](parsed_features[k])
parsed_features[k] = tf.cast(parsed_features[k], dtype=DTYPE_USED)
parsed_features[k] = parsed_features[k] - ms[k]
parsed_features[k] = parsed_features[k]/stds[k]
return parsed_features
def _normalize(self, path, fn, sizes):
"""
Compute some statistics of the file fn.
fn should be a csv file with a coma separator, copntaining only float objects, with a single header line.
side effect: will created some node in the tensorflow graph just to compute these statistics
:param path: the path where the file is
:param sizes: dimension of the file (number of columns) per type of data (dictionnary)
:param fn: the file name
:return: the mean, the standard deviation, and the number of rows
"""
acc = { k:np.zeros(shape=(v), dtype=np.float64) for k,v in sizes.items() }
acc2 = { k:np.zeros(shape=(v), dtype=np.float64) for k,v in sizes.items() }
msg_displayed = {k: 0 for k, _ in sizes.items()}
with tf.variable_scope("datareader_compute_means_vars"):
ms = {k: tf.constant(0.0, name="fake_means", dtype=DTYPE_USED) for k,_ in sizes.items()}
sds = {k: tf.constant(1.0, name="fake_stds", dtype=DTYPE_USED) for k,_ in sizes.items()}
dataset = tf.data.TFRecordDataset(
[os.path.join(path, el) for el in fn]).map(
lambda line: self._parse_function(example_proto=line, sizes=sizes, ms=ms, stds=sds),
num_parallel_calls=self.num_thread
).prefetch(self.num_thread * 5).batch(self.num_thread).repeat(1)
iterator = dataset.make_one_shot_iterator()
parsed_features = iterator.get_next(name="fake_iterator")
# I need a session to parse the features
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# from tqdm import tqdm
count = 0
with tf.Session(config=config) as sess:
while True:
count += 1
try:
pf = sess.run(parsed_features)
# print(np.sum(np.abs(pf["conso_X"])))
# pdb.set_trace()
for k in sizes.keys():
vect = pf[k].astype(np.float64)
if np.any(~np.isfinite(vect)):
if msg_displayed[k] == 0:
msg = "W Datareader : there are infinite or nan values in the dataset named {}"
msg += ". We replaced it with the current average (by column)"
print(msg.format(k))
msg_displayed[k] += 1
# pdb.set_trace()
# import numpy.ma as ma
vect = np.where(np.isfinite(vect), vect, acc[k]/(count*self.num_thread))
# pdb.set_trace()
# vect[~np.isfinite(vect)] = acc[k]/(count*self.num_thread)
acc[k] += np.nansum(vect, axis=0)
acc2[k] += np.nansum(vect*vect, axis=0)
# if np.any(~np.isfinite(acc2[k])):
# pdb.set_trace()
except tf.errors.OutOfRangeError:
break
for k in sizes.keys():
if msg_displayed[k] != 0:
msg = "W Datareader : there are at least {} lines where infinite or nan values "
msg += " were encounteredin the dataset named {}"
msg += ". They were replaced with the current average (by column) at the time of computation"
print(msg.format(msg_displayed[k], k))
acc = {k: v/(count*self.num_thread) for k,v in acc.items()}
acc2 = {k: v/(count*self.num_thread) for k,v in acc2.items()}
ms = acc
# pdb.set_trace()
stds = {k: np.sqrt(acc2[k] - v * v) for k,v in acc.items()}
for k,v in stds.items():
stds[k][stds[k] <= 1e-3] = 1.0
ms = {k: v.astype(DTYPE_NPY) for k, v in ms.items()}
stds = {k: v.astype(DTYPE_NPY) for k, v in stds.items()}
return ms, stds, count
def _nrows(self, array):
"""
:param array: unused
:return: the number of rows in the training set
"""
return self.nrows
class ExpData:
def __init__(self, batch_size=50, sizemax=int(1e4),
pathdata=".", path_exp=".",
classData=ExpDataReader,
sizes={"input":1, "output":1},
argsTdata=(), kwargsTdata={},
argsVdata=(), kwargsVdata={},
otherdsinfo = {},
donnotcenter={},
fun_preprocess=None,
dtypes={}
):
""" The base class for every 'data' subclasses, depending on the problem
:param batch_size: the size of the minibatch
:param pathdata: the path where data are stored
:param path_exp: the path where the experiment is saved
:param sizemax: maximum size of data chunk that will be "fed" to the computation graph
:param classData: the class of data (should derive from 'ExpDataReader')
:param sizes: the sizes (number of columns) of each dataset
:param argsTdata: default arguments to build an instance of class 'expDataReader' (build the training data set)
:param kwargsTdata: keywords arguments to build an instance of class 'expDataReader' (build the training data set)
:param argsVdata: default arguments to build an instance of class 'expDataReader' (build the validation data set)
:param kwargsVdata: keywords arguments to build an instance of class 'expDataReader' (build the validation data set)
:param otherdsinfo : dictionnaries of keys = dataset name, values = dictionnaries of keys: "argsdata" : tuple, kwargsdata: dictionnaries
:param donnotcenter: data that won't be centered/reduced
:param fun_preprocess: fun use to preprocess data (before centering / reducing), not apply for variable in donnotcenter (pairs: fun to preprocess, fun to "un preprocess")
"""
# subdirectory name of the experiment where means and std will be stored
self.means_vars_directory = "means_vars"
self.path_exp = path_exp
self.sizes = sizes
self.donnotcenter = donnotcenter
ms, sds = self._load_npy_means_stds(classData)
self.classData = classData
self.funs_preprocess = {varname: (tf.identity, lambda x :x ) for varname in self.sizes.keys()}
if fun_preprocess is not None:
for varname, fun in fun_preprocess.items():
self.funs_preprocess[varname]= fun
fun_preprocess = {k: v[0] for k, v in self.funs_preprocess.items()}
# pdb.set_trace()
# the data for training (fitting the models parameters)
self.trainingData = classData(*argsTdata,
donnotcenter=donnotcenter,
pathdata=pathdata,
sizes=sizes,
train=True,
batch_size=batch_size,
ms=ms,
sds=sds,
fun_preprocess=fun_preprocess,
dtypes=dtypes,
**kwargsTdata)
self.sizes = self.trainingData.sizes
# get the values of means and standard deviation of the training set,
# to be use in the others sets
self.ms = self.trainingData.ms
self.sds = self.trainingData.sds
# the data for training (only used when reporting error on the whole
# set)
self.trainData = classData(*argsTdata,
donnotcenter=donnotcenter,
pathdata=pathdata,
sizes=self.sizes,
train=False,
batch_size=sizemax,
ms=self.ms,
sds=self.sds,
fun_preprocess=fun_preprocess,
dtypes=dtypes,
**kwargsTdata)
# the data for validation set (fitting the models hyper parameters --
# only used when reporting error on the whole set)
self.valData = classData(*argsVdata,
donnotcenter=donnotcenter,
pathdata=pathdata,
sizes=self.sizes,
train=False,
batch_size=sizemax,
ms=self.ms,
sds=self.sds,
fun_preprocess=fun_preprocess,
dtypes=dtypes,
**kwargsVdata)
self.sizemax = sizemax # size maximum of a "minibatch" eg the maximum number of examples that will be fed
# at once for making a single forward computation
self.iterator = tf.data.Iterator.from_structure(
output_types=self.trainingData.dataset.output_types,
output_shapes=self.trainingData.dataset.output_shapes)
self.true_data = self.iterator.get_next(
name="true_data")
# pdb.set_trace()
self.training_init_op = self.iterator.make_initializer(
self.trainingData.dataset)
self.train_init_op = self.iterator.make_initializer(
self.trainData.dataset)
self.validation_init_op = self.iterator.make_initializer(
self.valData.dataset)
self.otherdatasets = {}
self.otheriterator_init = {}
for otherdsname, values in otherdsinfo.items():
self.otherdatasets[otherdsname] = classData(*values["argsdata"],
pathdata=pathdata,
sizes=self.sizes,
train=False,
batch_size=sizemax,
ms=self.ms,
sds=self.sds,
fun_preprocess=fun_preprocess,
dtypes=dtypes,
**values["kwargsdata"]
)
self.otheriterator_init[otherdsname] = self.iterator.make_initializer(self.otherdatasets[otherdsname].dataset)
def activate_val_set(self):
dataset = self.valData
initop = self.validation_init_op
return dataset, initop
def activate_trainining_set(self):
dataset = self.trainingData
initop = self.training_init_op
return dataset, initop
def activate_dataset(self, dataset_name):
dataset = self.otherdatasets[dataset_name]
initop = self.otheriterator_init[dataset_name]
return dataset, initop
def activate_trainining_set_sameorder(self):
dataset = self.trainData
initop = self.train_init_op
return dataset, initop
def _load_npy_means_stds(self, classData):
"""
If the means and variance have already been computed, it will load them from the hard drive
:param classData: the data class used
:return: ms, sds with
ms = None if data does not exists, otherwise the dictionnary of means for each variable in "sizes"
"""
if not os.path.exists(os.path.join(self.path_exp, self.means_vars_directory)):
return None, None
else:
isOk = True
for k in self.sizes.keys():
mE = os.path.exists(os.path.join(self.path_exp, self.means_vars_directory, "ms-{}.npy".format(k)))
sE = os.path.exists(os.path.join(self.path_exp, self.means_vars_directory, "sds-{}.npy".format(k)))
if not mE or not sE:
isOk = False
break
if not isOk:
return None, None
else:
ms = {}
sds = {}
for k in self.sizes.keys():
m = np.load(os.path.join(self.path_exp, self.means_vars_directory, "ms-{}.npy".format(k)))
s = np.load(os.path.join(self.path_exp, self.means_vars_directory, "sds-{}.npy".format(k)))
if k in self.donnotcenter:
m = np.zeros(m.shape, dtype=DTYPE_NPY)
s = np.ones(s.shape, dtype=DTYPE_NPY)
ms[k] = m
sds[k] = s
# pdb.set_trace()
if classData.ms_tensor:
ms = self._shape_properly(ms, name="means")
sds = self._shape_properly(sds, name="stds")
return ms, sds
def _shape_properly(self, ms, name):
"""
TODO copy paste from TFDataReader
Transform a dictionnary of numpy array in a dictionnary of tensorflow tensor
:param name
:param ms:
:return:
"""
return {k: tf.convert_to_tensor(v, name="{}_{}".format(name, k), dtype=DTYPE_USED) for k, v in ms.items()}
def getnrows(self):
"""
:return: Number of row of the training set
"""
return self.trainData.nrowsX()
def fancydescrption(self):
"""
:return: A description for an instance of this data type
"""
return "Standard data type"
def gettype(self):
"""
:return: the type of data this is.
"""
return "ExpData"
def getnrowsval(self):
"""
:return: Number of row of the validation set
"""
return self.valData.nrowsX()
def computetensorboard(self,
sess,
graph,
writers,
xval,
minibatchnum,
sum=False,
dict_summary=None
):
"""
Compute and log (using writers) the errors on the training set and validation set
Return the error ON THE VALIDATION SET
:param sess: a tensorflow session
:param graph: an object of class ExpGraph
:param writers: an object of class ExpWriter
:param xval: the index value of the tensorboard run
:param minibatchnum: the number of minibatches computed
:param sum: if true make the sum of both losses, other return the error ON THE VALIDATION SET
:return: THE VALIDATION SET (except if sum is true)
"""
# TODO why is it in data ?
valloss = np.NaN
# switch the reader to the the "train" dataset for reporting the
# training error
sess.run(self.train_init_op)
error_nan, trainloss = self.computetensorboard_aux(
sess=sess, graph=graph, writer=writers.tfwriter.trainwriter, xval=xval,
minibatchnum=minibatchnum, train=True, name="Train", textlogger=writers.logger)
if not error_nan and dict_summary is not None:
dict_summary["training_loss"] = float(trainloss)
# switch the reader to the the "validation" dataset for reporting the
# training error
sess.run(self.validation_init_op)
if not error_nan:
error_nan, valloss = self.computetensorboard_aux(
sess=sess, graph=graph, writer=writers.tfwriter.valwriter, xval=xval,
minibatchnum=minibatchnum, train=False, name="Validation", textlogger=writers.logger)
if not error_nan and dict_summary is not None:
dict_summary["validation_loss"] = float(valloss)
if not sum:
res = valloss
else:
res = trainloss + valloss
sess.run(self.training_init_op)
return error_nan, res
def computetensorboard_aux(
self,
sess,
graph,
writer,
textlogger,
name,
xval,
minibatchnum,
train=True,
dict_summary=None):
"""
Compute the error on a whole data set.
Report the results in the text logger and in tensorboard.
Chunk of data of size at most 'self.sizemax' are fed in one "chunk"
:param sess: the tensorflow session
:param graph: the ExpGraph to be used for the comptuation
:param writer: the tensorflow writer to use
:param textlogger: the text logger to use to store the information
:param name: the name displayed on the file logger
:param xval: the 'x value' to be written in tensorboard
:param minibatchnum: the current number of minibatches
:param train: does it concern the training set
:return:
"""
acc_loss = 0.
error_nan = False
while True:
try:
summary, loss_ = graph.run(
sess, toberun=[
graph.mergedsummaryvar, graph.loss])
if train:
writer.add_summary(summary, xval)
else:
writer.add_summary(summary, xval)
acc_loss += loss_
error_nan = not np.isfinite(loss_)
if error_nan:
break
elif dict_summary is not None:
dict_summary["loss_{}".format(name)] = float(loss_)
except tf.errors.OutOfRangeError:
break
# name = "Train" if train else "Validation"
textlogger.info(
"{} l2 error after {} minibatches : {}".format(
name, minibatchnum, acc_loss))
return error_nan, acc_loss
def combinerescompute(self, res, curr, toberun, indx=None):
"""
Combines the previous results (in res) and the new one (in curr) to get the new one
:param res: previous results
:param curr: last results computed
:param toberun: what have been run
:param indx: on which part of the data
:return:
"""
if toberun is None:
res += np.float32(curr) # TODO false in case of mean for example!
else:
res[indx, :] = curr
return res
def initres(self, dataset, size=0):
"""
Init the results structure to store the results of a forward pass on the whole 'dataset'
:param dataset: the data for which the resutls will be computed
:param size: the size you want (by default dataset.nrowsX()) [unused in this version]
:return:
"""
init = np.ndarray(
(dataset.nrowsX(),
dataset.ncolsY()),
dtype=DTYPE_USED)
return init
def getdata(self):
"""
:return: the data read and parsed dictionnary with: key=name, values=the associated tensor
"""
return self.true_data
def init(self, sess):
"""
initialize the data if needed
:param sess:
"""
sess.run(self.training_init_op)
if self.classData.ms_tensor:
# the data class represents the means and standard deviation as tensors
self.sds = sess.run(self.sds)
self.ms = sess.run(self.ms)
if not os.path.exists(os.path.join(self.path_exp, self.means_vars_directory)):
os.mkdir(os.path.join(self.path_exp, self.means_vars_directory))
# pdb.set_trace()
for k in self.sizes.keys():
np.save(file=os.path.join(self.path_exp, self.means_vars_directory, "ms-{}.npy".format(k)),
arr=self.ms[k])
np.save(file=os.path.join(self.path_exp, self.means_vars_directory, "sds-{}.npy".format(k)),
arr=self.sds[k])
def computetensorboard_annex(self, sess, writers, graph, xval, minibatchnum, name, dict_summary=None):
"""
Will compute the error on the data "referenced" by "name", and store it using the TFWriters "writers"
:param sess: a tensorflow session
:param graph: an object of class ExpGraph
:param writers: an object of class ExpWriter
:param xval: the index value of the tensorboard run
:param minibatchnum: the number of minibatches computed
:param name: the name of the dataset you want the error from
:return: nothing
"""
if not name in self.otheriterator_init:
error_str = "ExpData.computetensorboard_annex you ask to compute the error on the dataset name \"{}\""
error_str += " but it does not exists.\nMake sure to have passed the proper \"otherdsinfo\" arguments"
error_str += " when you build your ExpData. For example :\n"
error_str += "otherdsinfo={{ \"{}\":{{\"argsdata\":(), \"kwargsdata\": {{ \"filename\": "
error_str += "\"test_example.tfrecord\" }} }} }} }} \n"
error_str += "if you want to link the dataset with data coming from \"test_example.tfrecord\" to the name \"{}\""
raise RuntimeError(error_str.format(name, name, name))
sess.run(self.otheriterator_init[name])
self.computetensorboard_aux(
sess=sess, graph=graph, writer=writers.tfwriter.othersavers[name], xval=xval,
minibatchnum=minibatchnum, train=False, name=name, textlogger=writers.logger, dict_summary=dict_summary)
sess.run(self.training_init_op)
class ExpInMemoryDataReader(ExpDataReader):
ms_tensor = False # is the "self.ms" (or "self.sds") a tensor (True) or a numpy array (False)
def __init__(self, train, batch_size, pathdata=".",
filename={"input": "X.npy" , "output": ("Y.npy",)},
sizes=None,
num_thread=4, donnotcenter={},
fun_preprocess=lambda x: x,
dtypes={},
ms=None, sds=None,
numpy_=True,
panda_=False,
sep=";"):
"""
Load data in memory, and then use an iterator to process it.
Data can be read from numpy array (numpy_ = True) or from pandas data frame (numpy_ = False, panda_=True).
In the later case, dataframe are converted to numpy array
:param train:
:param batch_size:
:param pathdata:
:param filenames:
:param sizes:
:param num_thread:
:param donnotcenter:
:param ms:
:param sds:
"""
self.train = train
self.filename = filename
if numpy_:
mmap_mode = None if train else "c" # "c" stand for: data are kept on the hard drive, but can be modified in memory
self.datasets = {k: np.load(os.path.join(pathdata, v), mmap_mode=mmap_mode) for k, v in filename.items()}
elif panda_:
import pandas as pd
self.datasets = {k: pd.read_csv(os.path.join(pathdata, v), sep=sep).values for k, v in filename.items()}
else:
raise RuntimeError("For now only panda dataframe and numpy array are supported in ")
self.datasets = {k: v.astype(DTYPE_NPY) for k, v in self.datasets.items() }
if self.train:
sizes = {k: el.shape[1] for k, el in self.datasets.items()}
self.sizes = sizes
else:
self.sizes = sizes
self._check_validity()
# pdb.set_trace()
if ms is None:
ms_ = {k: np.mean(v, axis=0) for k,v in self.datasets.items()}
self.ms = {k: v for k,v in ms_.items() if not k in donnotcenter}
for el in donnotcenter:
self.ms[el] = np.zeros(ms_[el].shape, dtype=DTYPE_NPY)
else:
self.ms = ms
if sds is None:
sds_ = {k: np.std(v, axis=0) for k,v in self.datasets.items()}
for k, v in sds_.items():
sds_[k][v <= 1e-3] = 1.0
self.sds = {k: v for k,v in sds_.items() if not k in donnotcenter}
for el in donnotcenter:
self.sds[el] = np.ones(sds_[el].shape, dtype=DTYPE_NPY)
else:
self.sds = sds
self.datasets = {k: (v-self.ms[k])/self.sds[k] for k, v in self.datasets.items()}
# self.placeholders = {k: tf.placeholder(shape=(None, v), dtype=tf.float32) for k,v in sizes.items()}
self.batch_size = batch_size
self.nrows = self.datasets[next(iter(filename.keys()))].shape[0]
self.indexDataMinibatch = list(range(self.nrows))
if self.train:
random.shuffle(self.indexDataMinibatch)
self.lastIndexDataMinibatch = 0
self.train = train
self.features = {k: tf.FixedLenFeature(val, tf.float32)#, default_value=[0.0 for _ in range(val)])
for k, val in sizes.items()}
self.dataset = tf.data.Dataset.from_generator(generator=self.generator,
output_types={k: v.dtype for k, v in self.features.items()},
output_shapes={k: v.shape for k, v in self.features.items()})
# pdb.set_trace()
if train:
self.dataset = self.dataset.repeat(-1)
# self.dataset = self.dataset.shuffle(buffer_size=10000)
else:
self.dataset = self.dataset.repeat(1)
self.dataset = self.dataset.batch(batch_size=batch_size)
# pdb.set_trace()
def _check_validity(self):
nr_ = None
for k, v in self.datasets.items():
if nr_ is None:
nr_ = v.shape[0]
else:
if v.shape[0] != nr_:
raise RuntimeError("Dataset {} has not the same size as the other. Stopping here.".format(self.filename[k]))
if v.shape[0] == 0:
raise RuntimeError("Empty dataset file for {}. Stopping there".format(self.filename[k]))
if v.shape[1] != self.sizes[k]:
raise RuntimeError("Dataset {} has not the same number of columns as specified in the sizes.".format(self.filename[k]))
def generator(self):
"""
:return: the data of one line of the dataset
"""
new_epoch = False
while not new_epoch:
new_epoch, indx = self.getnextindexes()
yield {k: v[indx, :].flatten() for k, v in self.datasets.items()}
raise StopIteration
def getnextindexes(self):
"""
:return: the next index to be considered
"""
size = 1
new_epoch = False
if self.lastIndexDataMinibatch+size < self.nrows:
prev = self.lastIndexDataMinibatch
self.lastIndexDataMinibatch += size
res = self.indexDataMinibatch[prev:self.lastIndexDataMinibatch]
else:
new_epoch = True
prev = self.lastIndexDataMinibatch
res = self.indexDataMinibatch[prev:]
if self.train:
random.shuffle(self.indexDataMinibatch)
self.lastIndexDataMinibatch = size-len(res)
res += self.indexDataMinibatch[:self.lastIndexDataMinibatch]
self.lastIndexDataMinibatch = 0
return new_epoch, res
def init(self, sess):
"""
:param sess:
:return:
"""
# tf.train.start_queue_runners(sess=sess)
pass
def nrowsX(self):
"""
:return: The number of rows / examples of the input data set
"""
return self.nrows
class ExpNpyDataReader(ExpInMemoryDataReader):
def __init__(self, args, kwargs):
ExpInMemoryDataReader.__init__(self, *args, **kwargs)
if __name__=="__main__":
test_tf_nb_lines = ExpTFrecordsDataReader(train=True, batch_size=1)
print("The number of lines computed is {}".format(test_tf_nb_lines._countlines(path="/home/bdonnot/Documents/PyHades2/tfrecords_118_5000",
fn="neighbours-test.tfrecord",
sizes={"prod_q": 54})))
print("Test lines parsing {}".format(test_tf_nb_lines._normalize(path="/home/bdonnot/Documents/PyHades2/tfrecords_118_5000",
fn="neighbours-test.tfrecord",
sizes={"prod_q":54})))
| gpl-3.0 |
gregbehm/twittertools | twittertools.py | 1 | 29539 | """
twittertools.py
Twitter API data acquisition tools
Author: Greg Behm
Copyright 2017-2019
Caution!
This software is for personal use and demonstration only.
Any Twitter content collected with this software should be
used in accordance with Twitter's Terms of Service:
https://twitter.com/en/tos
Read the Twitter Terms of Service, Twitter Developer Agreement,
and Twitter Developer Policy before collecting Twitter content
with this or any derivative software.
A few of the methods presented here were derived (and improved) from
sample code in the book Mining the Social Web, 2nd Edition,
copyright 2014 by Matthew A. Russell, O'Reilly, ISBN 978-1449367619.
The book's author and publisher give permission to reuse sample code
under the Simplified BSD License. More information is found at:
https://github.com/ptwobrussell/Mining-the-Social-Web-2nd-Edition
"""
import collections
from contextlib import suppress
import datetime
import itertools
import json
import re
import time
import pandas
# https://pypi.python.org/pypi/twitter
import twitter
# --- Define functions --- #
def get_api(credentials_file):
"""
Create an authenticated twitter.Twitter() API object.
:param credentials_file: Twitter application credentials JSON file name.
:return: twitter.Twitter() API object
"""
with open(credentials_file) as f:
data = json.load(f)
auth = twitter.oauth.OAuth(data['access_token'],
data['access_token_secret'],
data['consumer_key'],
data['consumer_secret'])
return twitter.Twitter(auth=auth)
def save_to_json(items, path_or_buf):
"""
Save an iterable of dict objects to a JSON file.
:param items: Iterable of dictionary objects
:param path_or_buf: String, file path or file handle
:return: None
"""
with open(path_or_buf, mode='w', encoding='utf-8-sig') as f:
json.dump(items, f)
def save_to_csv(items, unpack_func, path_or_buf):
"""
Save an iterable of tweets to a CSV file, saving select
fields as defined in function unpack_tweet().
:param items: Iterable of Twitter objects
:param unpack_func: function to extract select fields
from individual Twitter objects,
such as tweets and user profiles
Example:
save_to_csv(tweets, unpack_tweets, 'tweets.csv')
:param path_or_buf: String, file path or file handle
:return: None
"""
df = pandas.DataFrame(unpack_func(item) for item in items)
df.to_csv(path_or_buf, index=False, encoding='utf-8-sig')
def save_tweets(tweets, path_or_buf):
"""
Save an iterable of tweets to a CSV file, saving select
fields as defined in function unpack_tweet().
:param tweets: Iterable of tweet objects
:param path_or_buf: String, file path or file handle
:return: None
"""
save_to_csv(tweets, unpack_tweet, path_or_buf)
def save_profiles(profiles, path_or_buf):
"""
Save an iterable of user objects to a CSV file, saving select
fields as defined in function unpack_profile().
:param profiles: Iterable of user objects
:param path_or_buf: String, file path or file handle
:return: None
"""
save_to_csv(profiles, unpack_profile, path_or_buf)
def get_data(item, *args):
"""
Search input item dictionary by key words for single-value
or list-of-value items. The caller must specify the necessary
key words in correct hierarchical order to retrieve the
requested item.
Examples:
To get a tweet's user screen_name, e.g.
{"user": {..., "screen_name": "katyperry", ...}},
call get_data(tweet, 'user', 'screen_name').
To get a tweet's hashtags, e.g.
{"entities": {"hashtags": [{"text": "blockchain",...}},
call get_list_dict_data(tweet, 'entities', 'hashtags', 'text')
:param item: Twitter dictionary object, e.g. Tweet or User
:param args: Item-search dictionary keys
:return: Requested item or object. If the last args key word
retrieves a list of objects, get_data() returns a
concatenated string of list items.
"""
def next_item(item, key):
"""
Get a dictionary or list object, depending on whether
the input item is a dictionary or list.
:param item: Twitter object dictionary or list
:param key: Target element key word
:return: If item is a dictionary, return the value for input key.
If item is a list with dictionary elements containing
the input key, return the list.
"""
# Try item as a dictionary
try:
# return the dictionary item's value
return item[key]
except KeyError:
# Try item as a list of dictionaries
try:
if item[0].get(key, None):
# return the dictionary
return item
except (IndexError, AttributeError):
# Nothing relevant found
return None
# def next_item
for arg in args:
item = next_item(item, arg)
if item is None:
return None
# Handle list items
if isinstance(item, list):
# Try as list of dictionaries
with suppress(IndexError, KeyError):
key = args[-1]
return ' '.join(elem[key] for elem in item)
# Flatten list of lists
flat = itertools.chain.from_iterable(item)
return ' '.join(str(elem) for elem in flat)
# Return any other item as-is
return item
def unpack_tweet(tweet):
"""
Extract select fields from the given tweet object.
:param tweet: Twitter Tweet object
:return: Ordered dictionary of select tweet field values
"""
try:
if tweet['retweeted_status'] is None:
text = tweet['full_text']
else:
text = tweet['retweeted_status']['full_text']
except KeyError:
text = tweet['full_text']
fields = [('screen_name', get_data(tweet, 'user', 'screen_name')),
('created', format_datetime(get_data(tweet, 'created_at'))),
('full_text', clean_whitespace(text)),
('retweet_count', get_data(tweet, 'retweet_count')),
('hashtags', get_data(tweet, 'entities', 'hashtags', 'text')),
('mentions', get_data(tweet, 'entities', 'user_mentions', 'screen_name')),
('urls', get_data(tweet, 'entities', 'urls', 'url')),
('expanded_urls', get_data(tweet, 'entities', 'urls', 'expanded_url')),
('media_urls', get_data(tweet, 'entities', 'media', 'url')),
('media_types', get_data(tweet, 'entities', 'media', 'type')),
('tweet_id', get_data(tweet, 'id_str')),
('symbols', get_data(tweet, 'entities', 'symbols', 'text'))
]
return collections.OrderedDict(fields)
def unpack_profile(profile):
"""
Extract select fields from the given user object.
:param profile: Twitter User object
:return: Ordered dictionary of select user field values
"""
fields = [('name', get_data(profile, 'name')),
('screen_name', get_data(profile, 'screen_name')),
('id', get_data(profile, 'id_str')),
('description', clean_whitespace(get_data(profile, 'description'))),
('location', get_data(profile, 'location')),
('tweets', get_data(profile, 'statuses_count')),
('following', get_data(profile, 'friends_count')),
('followers', get_data(profile, 'followers_count')),
('favorites', get_data(profile, 'favourites_count')),
('language', get_data(profile, 'lang')),
('listed', get_data(profile, 'listed_count')),
('created', format_datetime(get_data(profile, 'created_at'))),
('time_zone', get_data(profile, 'time_zone')),
('protected', get_data(profile, 'protected')),
('verified', get_data(profile, 'verified')),
('geo_enabled', get_data(profile, 'geo_enabled'))
]
return collections.OrderedDict(fields)
def format_datetime(date_str):
"""
Convert Twitter's date time format ("Thu Jul 20 19:34:20 +0000 2017")
to ISO 8601 International Standard Date and Time format.
:param date_str:
:return:
"""
with suppress(TypeError, ValueError):
dt = datetime.datetime.strptime(date_str, '%a %b %d %H:%M:%S +0000 %Y')
return dt.strftime('%Y-%m-%dT%H:%M:%SZ')
return None
def clean_whitespace(text):
"""
Remove extraneous whitespace characters (includes e.g. newlines)
:param text: Input text
:return: Cleaned text with unwanted whitespace characters replaced
with single spaces.
"""
return re.sub('\s+', ' ', text)
# --- Define classes --- #
class TwitterTools:
"""
twittertools Twitter API class
"""
def __init__(self, credentials_file):
self.credentials = credentials_file
self.api = get_api(self.credentials)
if self.api:
self.api_endpoint_method = {
'/application/rate_limit_status': self.api.application.rate_limit_status,
'/favorites/list': self.api.favorites.list,
'/followers/ids': self.api.followers.ids,
'/friends/ids': self.api.friends.ids,
'/lists/create': self.api.lists.create,
'/lists/members/create': self.api.lists.members.create,
'/lists/members/create_all': self.api.lists.members.create_all,
'/search/tweets': self.api.search.tweets,
'/statuses/home_timeline': self.api.statuses.home_timeline,
'/statuses/user_timeline': self.api.statuses.user_timeline,
'/statuses/lookup': self.api.statuses.lookup,
'/statuses/update': self.api.statuses.update,
'/trends/available': self.api.trends.available,
'/trends/closest': self.api.trends.closest,
'/trends/place': self.api.trends.place,
'/users/lookup': self.api.users.lookup
}
def __repr__(self):
return f'{self.__class__.__name__}({self.credentials!r})'
def endpoint_request(self, endpoint, *args, **kwargs):
"""
Send Twitter API requests (e.g. GET, POST), handle request errors,
and return requested Twitter content.
:param endpoint: Endpoint request string, e.g. '/search/tweets'
:param args: Optional, user-supplied positional arguments
:param kwargs: Optional, user-supplied keyword arguments
:return: Twitter content, defined by endpoint request.
"""
def handle_http_error(error, endpoint, wait, retry=True):
"""
Handle common twitter.api.TwitterHTTPError(s)
:param error: twitter.api.TwitterHTTPError error object
:param endpoint: Endpoint request string, e.g. '/search/tweets'
:param wait: Wait period, in seconds
:param retry: Retry on error; default True
:return: Updated wait time
"""
# See https://dev.twitter.com/docs/error-codes-responses
errors = {401: '(Unauthorized)',
403: '(Forbidden)',
404: '(Not Found)',
429: '(Rate Limit Exceeded)',
500: '(Internal Server Error)',
502: '(Bad Gateway)',
503: '(Service Unavailable)',
504: '(Gateway Timeout)'
}
ecode = error.e.code
now = f'{datetime.datetime.now():%Y-%m-%d %H:%M:%S}'
descr = errors.get(ecode, "(Unknown)")
print(f'{now}: Error {ecode} {descr} on "{endpoint}"', flush=True)
if ecode in (401, 403, 404):
# Caller must handle these errors. Return 0 wait time.
return 0
if ecode == 429:
if retry:
print('Retrying in 15 minutes...', end=' ', flush=True)
time.sleep(60 * 15)
print('awake and trying again.')
# Return wait time to default 1 second.
return 1
else:
# No retries
return 0
if ecode in (500, 502, 503, 504):
if retry:
print(f'Retrying in {wait} seconds...', end=' ', flush=True)
time.sleep(wait)
print('awake and trying again.')
wait *= 1.5
if wait < 60 * 30:
return wait
print('Too many retries. Quitting.')
raise error
# def handle_http_error
api_endpoint = self.api_endpoint_method[endpoint]
wait = 1
while wait:
try:
return api_endpoint(*args, **kwargs)
except twitter.api.TwitterHTTPError as e:
wait = handle_http_error(e, endpoint, wait)
def get_user_tweets(self, endpoint, screen_name=None, user_id=None,
max_tweets=None, **kwargs):
"""
Request a user's tweets (statuses) according to the endpoint
:param endpoint: Endpoint request string, e.g. '/statuses/user_timeline'
:param screen_name: User's screen name, a.k.a. handle, e.g. 'katyperry'
:param user_id: User's numeric ID
:param max_tweets: Maximum tweets requested
:param kwargs: Optional, user-supplied keyword arguments
:return: A list of Tweet objects
"""
# No screen_name or user_id implies default to authenticated user
if screen_name:
kwargs['screen_name'] = screen_name
elif user_id:
kwargs['user_id'] = user_id
kwargs['tweet_mode'] = 'extended'
count = 200
tweets = []
while True:
# Limit each GET to a maximum 200 tweets
kwargs['count'] = min(count, max_tweets - len(tweets)) if max_tweets else count
# To correctly traverse the user's timeline, set the
# max_id parameter after the first tweets are available.
# See https://dev.twitter.com/rest/public/timelines.
if tweets:
kwargs['max_id'] = min(tweet['id'] for tweet in results) - 1
results = self.endpoint_request(endpoint, **kwargs)
if not results:
break
tweets.extend(results)
if max_tweets and len(tweets) >= max_tweets:
break
return tweets
def get_cursored_items(self, endpoint, key, count=5000, max_items=None, **kwargs):
"""
Helper request function for cursored objects.
:param endpoint: Endpoint request string, e.g. '/followers/ids'
:param key: Cursored items key, e.g. 'ids'
:param count: Maximum items per request
:param max_items: Maximum total items requested
:param kwargs: Optional, user-supplied keyword arguments
:return: A list of requested objects
"""
kwargs['count'] = count
items = []
cursor = -1
while cursor:
kwargs['cursor'] = cursor
results = self.endpoint_request(endpoint, **kwargs)
if not results:
break
items.extend(results[key])
if max_items and len(items) >= max_items:
break
cursor = results['next_cursor']
return items
def get_items_by_lookup(self, endpoint, item_keyword, items, **kwargs):
"""
Get user-requested objects of type item_keyword, named in the items list.
:param endpoint: Endpoint request string, e.g. '/users/lookup'
:param item_keyword: Endpoint request keyword
Note: Endpoint 'id' requests must call the twitter.Twitter()
API methods with kwargs['_id'] to produce correct results.
:param items: User-supplied list of requested items, e.g. screen names
:param kwargs: Optional, user-supplied keyword arguments
:return: A list of user-requested objects. Note: The Twitter API
doesn't guarantee that objects are returned in the order
requested. The caller is responsible for confirming the
order and completeness of the returned list, if necessary.
"""
# Request up to 100 items per call
items_max = 100
results = []
while items:
items_str = ','.join(str(item) for item in items[:items_max])
kwargs[item_keyword] = items_str
response = self.endpoint_request(endpoint, **kwargs)
if not response:
break
results.extend(response)
items = items[items_max:]
return results
def get_rate_limits(self, key_0=None, key_1=None):
"""
Query the authorized user's current rate limit data.
See Rate Limits chart at https://dev.twitter.com/rest/public/rate-limits
:param key_0: Optional, single category request, e.g. 'statuses'
:param key_1: Optional, subcategory category request, e.g. '/statuses/user_timeline'
:return: Requested limits dictionary
"""
limits = self.endpoint_request('/application/rate_limit_status')
if limits:
limits = limits['resources']
if key_0 in limits:
limits = limits[key_0]
return limits[key_1] if key_1 else limits
else:
return limits
return None
def get_home_timeline(self, max_tweets=None):
"""
Get a list of the most recent tweets and retweets posted
by the authenticating user and the user's friends (following).
:param max_tweets: Optional maximum tweets requested
:return: List of tweets
"""
return self.get_user_tweets('/statuses/home_timeline', max_tweets)
def get_user_timeline(self, screen_name=None, user_id=None, max_tweets=None):
"""
Get a list of the most recent tweets posted by the user specified
by screen_name or user_id. If both screen_name and user_id are given,
screen_name is used. If neither screen_name nor user_id is given,
results are collected for the authenticated user.
:param screen_name: User's screen name, a.k.a. handle, e.g. 'katyperry'
:param user_id: User's numeric ID
:param max_tweets: Maximum desired tweets
:return: List of tweets
"""
return self.get_user_tweets('/statuses/user_timeline', screen_name, user_id, max_tweets)
def get_user_favorites(self, screen_name=None, user_id=None, max_tweets=None):
"""
Get a list of the most recent tweets favorited by the authenticating
user, or the user specified by screen_name or user_id. If both screen_name
and user_id are given, screen_name is used. If neither screen_name nor user_id
is given, results are collected for the authenticated user.
:param screen_name: User's screen name, a.k.a. handle, e.g. 'katyperry'
:param user_id: User's numeric ID
:param max_tweets: Maximum desired tweets
:return: List of tweets
"""
return self.get_user_tweets('/favorites/list', screen_name, user_id, max_tweets)
def get_user_profiles(self, screen_names=None, user_ids=None):
"""
Get a list of user objects as specified by values given by the screen_names
or user_ids list parameter. If both lists, screen_names and user_ids,
are given, screen_names is used.
:param screen_names: List of user screen names, a.k.a. handles
:param user_ids: List of user numeric IDs
:return: List of user objects
"""
items = screen_names or user_ids
item_keyword = 'screen_name' if screen_names else 'user_id'
return self.get_items_by_lookup('/users/lookup', item_keyword, items)
def get_tweets_by_id(self, ids, **kwargs):
"""
Get a list of tweets, specified by a given list of
numeric Tweet IDs in parameter ids.
:param ids: List of unique numeric tweet IDs
:return: List of requested tweets
"""
kwargs['tweet_mode'] = 'extended'
return self.get_items_by_lookup('/statuses/lookup', '_id', ids, **kwargs)
def get_connection_ids(self, which='friends', screen_name=None, user_id=None,
max_ids=None, **kwargs):
"""
For the user specified by screen_name or user_id, get a list of user IDs
for every user the specified user is following (which="friends"), or
for every user following the specified user (which=followers"). If both
screen_name and user_id are given, screen_name is used.
:param which: Connection type, Friends or Followers
:param screen_name: User's screen name, a.k.a. handle, e.g. 'katyperry'
:param user_id: User's numeric ID
:param max_ids: Maximum IDs to request
:param kwargs: Optional, user-supplied keyword arguments
:return: List of user IDs
"""
endpoint = {'friends': '/friends/ids',
'followers': '/followers/ids'}.get(which)
# No screen_name or user_id implies default to authenticated user
if screen_name:
kwargs['screen_name'] = screen_name
elif user_id:
kwargs['user_id'] = user_id
return self.get_cursored_items(endpoint, 'ids', max_items=max_ids, **kwargs)
def get_trend_locations(self, lat_lon=None):
"""
Get a list of locations for which Twitter has trending topic information.
Each location response encodes the WOEID and other human-readable information,
such as a canonical name and country.
If param lat_lon is given, returns a list of locations for which
Twitter has trending topics closest to the specified location.
A WOEID is a Yahoo! Where On Earth ID (http://developer.yahoo.com/geo/geoplanet).
:param lat_lon: Earth latitude and longitude coordinates,
passed as a tuple or list of decimal values
ranging from +90.0 to -90.0 for latitude,
and from +180.0 to -180.0 for longitude.
:return: List of Twitter location objects
"""
if lat_lon:
lat, lon = lat_lon
kwargs = {'lat': lat, 'long': lon}
return self.endpoint_request('/trends/closest', **kwargs)
else:
return self.endpoint_request('/trends/available')
def get_trends(self, woeid=1):
"""
Get a list of the top 50 trending topics for a specific WOEID,
if trending information is available for that WOEID. Responses
are trend objects that encode the trending topic, from which
the "query" parameter can be used to search for the topic on
Twitter Search.
:param woeid: Yahoo! Where On Earth location ID
(http://developer.yahoo.com/geo/geoplanet)
Default: WOEID=1 for worldwide
:return: List of trending topics for the specified WOEID.
"""
kwargs = {'_id': woeid}
return self.endpoint_request('/trends/place', **kwargs)[0]['trends']
def post_status_update(self, status, media_ids=None, **kwargs):
"""
Post a Tweet!
:param status: Required text
:param media_ids: Optional media id list. Twitter-supplied ids from media upload.
See https://dev.twitter.com/rest/reference/post/media/upload
:param kwargs: Optional, user-supplied keyword arguments
:return: Status (tweet) object, or error on failure
"""
kwargs['status'] = status
if media_ids:
kwargs['media_ids'] = media_ids
return self.endpoint_request('/statuses/update', **kwargs)
def post_lists_create(self, name, mode='private', description=None, **kwargs):
"""
Create a list
:param name: The name for the list. A list’s name must start with
a letter and can consist only of 25 or fewer letters,
numbers, “-”, or “_” characters.
:param mode: "public" or "private"; default "private"
:param description: Optional list description (100 character limit)
:param kwargs: Optional, user-supplied keyword arguments
:return: Twitter List object
"""
kwargs['name'] = name
kwargs['mode'] = mode
if description:
kwargs['description'] = description
return self.endpoint_request('/lists/create', **kwargs)
def post_lists_members_create(self, mode='add',
list_id=None, slug=None,
user_ids=None, screen_names=None,
owner_screen_name=None,
owner_id=None,
**kwargs):
"""
Add user(s) to a list
:param mode: 'add' member to list, or create 'all' members
:param list_id: Numerical list id
:param slug: You can identify a list by its slug instead
of by its numerical id. If using slug, must
also specify list owner using owner_id or
owner_screen_name parameter.
:param user_ids: Python list of user IDs; up to 100 per request.
:param screen_names: Python list of screen names; up to 100 per request.
:param owner_screen_name: User screen name who owns list requested by slug.
:param owner_id: User ID who owns list requested by slug.
:param kwargs: Optional, user-supplied keyword arguments
:return: Twitter List object
"""
kwargs['slug'] = slug
kwargs['list_id'] = list_id
kwargs['owner_screen_name'] = owner_screen_name
kwargs['owner_id'] = owner_id
names = screen_names
if names:
kwargs['screen_name'] = names[0] if mode == 'add' else ','.join(names)
elif user_ids:
kwargs['user_id'] = user_ids[0] if mode == 'add' else ','.join(user_ids)
else:
return None
if mode == 'add':
return self.endpoint_request('/lists/members/create', **kwargs)
else:
return self.endpoint_request('/lists/members/create_all', **kwargs)
def search_tweets(self, query, max_requests=5):
"""
Get a list of relevant Tweets matching a specified query.
# See https://dev.twitter.com/rest/public/search and
# https://dev.twitter.com/rest/reference/get/search/tweets
Quoting from https://dev.twitter.com/rest/public/search:
"The Twitter Search API is part of Twitter’s REST API.
It allows queries against the indices of recent or
popular Tweets and behaves similarly to, but not exactly
like the Search feature available in Twitter mobile or web
clients, such as Twitter.com search. The Twitter Search API
searches against a sampling of recent Tweets published in
the past 7 days.
Before getting involved, it’s important to know that the
Search API is focused on relevance and not completeness.
This means that some Tweets and users may be missing from
search results. If you want to match for completeness you
should consider using a Streaming API instead."
:param query: Twitter search term
:param max_requests: Maximum query requests. Each request
returns up to 100 results, and the
authenticated user is limited to
180 requests per 15 minutes.
:return: List of tweets
"""
# Prepare first request
kwargs = {'q': query, 'count': 100, 'tweet_mode': 'extended'}
tweets = []
for search in range(max_requests):
if tweets:
try:
next_results = results['search_metadata']['next_results']
# Create a dict from next_results, which has this format:
# ?max_id=313519052523986943&q=NCAA&include_entities=1
kwargs = dict(item.split('=') for item in next_results[1:].split("&"))
# No further results when 'next_results' is missing
except KeyError:
break
results = self.endpoint_request('/search/tweets', **kwargs)
if not results['statuses']:
break
tweets.extend(results['statuses'])
return tweets
| mit |
cbeighley/peregrine | peregrine/analysis/acquisition.py | 4 | 8541 | #!/usr/bin/env python
# Copyright (C) 2012 Swift Navigation Inc.
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
"""Functions for analysing and plotting acquisition results."""
import numpy as np
import matplotlib.pyplot as plt
from operator import attrgetter
from peregrine.acquisition import AcquisitionResult, DEFAULT_THRESHOLD
__all__ = ['acq_table', 'snr_bars', 'peak_plot', 'acq_plot_3d']
def acq_table(acq_results, show_all=False):
"""
Print a table of acquisition results.
Parameters
----------
acq_results : [:class:`peregrine.acquisition.AcquisitionResult`]
List of :class:`peregrine.acquisition.AcquisitionResult` objects.
show_all : bool, optional
If `True` then even satellites which have not been acquired will be shown
in the table.
"""
for ar in acq_results:
if ar.status == 'A':
if show_all:
print '*',
print ar
elif show_all:
print ' ',
print ar
def snr_bars(acq_results,
threshold=DEFAULT_THRESHOLD, ax=None, show_missing=True):
"""
Display the acquisition Signal to Noise Ratios as a bar chart.
This function is useful for visualising the output of
:meth:`peregrine.acquisition.Acquisition.acquisition` or saved acquisition
results files loaded with :func:`peregrine.acquisition.load_acq_results`.
Parameters
----------
acq_results : [:class:`peregrine.acquisition.AcquisitionResult`]
List of :class:`peregrine.acquisition.AcquisitionResult` objects to plot
bars for. If the `status` field of the
:class:`peregrine.acquisition.AcquisitionResult` object is ``'A'``, i.e.
the satellite has been acquired, then the bar will be highlighted.
theshold : {float, `None`}, optional
If not `None` then an acquisition theshold of this value will be indicated
on the plot. Defaults to the value of
:attr:`peregrine.acquisition.DEFAULT_THRESHOLD`.
ax : :class:`matplotlib.axes.Axes`, optional
If `ax` is not `None` then the bar chart will be plotted on the supplied
:class:`matplotlib.axes.Axes` object rather than as a new figure.
show_missing : bool, optional
If `True` then the bar chart will show empty spaces for all PRNs not
included in `acq_results`, otherwise only the PRNs in `acq_results` will be
plotted.
Returns
-------
out : :class:`matplotlib.axes.Axes`
The `Axes` object that the bar chart was drawn to.
"""
if ax is None:
fig = plt.figure()
fig.set_size_inches(10, 4, forward=True)
ax = fig.add_subplot(111)
if show_missing:
prns = [r.prn for r in acq_results]
missing = [prn for prn in range(31) if not prn in prns]
acq_results = acq_results[:] + \
[AcquisitionResult(prn, 0, 0, 0, 0, '-') for prn in missing]
acq_results.sort(key=attrgetter('prn'))
for n, result in enumerate(acq_results):
if (result.status == 'A'):
colour = '#FFAAAA'
else:
colour = '0.8'
ax.bar(n-0.5, result.snr, color=colour, width=1)
ax.set_xticks(range(len(acq_results)))
ax.set_xticklabels(['%02d' % (r.prn+1) for r in acq_results])
ax.set_title('Acquisition results')
ax.set_ylabel('Acquisition metric')
if threshold is not None:
ax.plot([-0.5, len(acq_results)-0.5], [threshold, threshold],
linestyle='dashed', color='black')
ax.text(0.01, 0.97, 'threshold = %.1f' % threshold,
horizontalalignment='left',
verticalalignment='top',
transform = ax.transAxes)
yticks = ax.get_yticks()
dist = np.abs(yticks - threshold).min()
if dist >= 0.25*(yticks[1] - yticks[0]):
ax.set_yticks(np.append(yticks, threshold))
ax.set_xbound(-0.5, len(acq_results)-0.5)
ax.set_xlabel('PRN')
return ax
def peak_plot(powers, freqs, samples_per_code, code_length=1023.0):
"""
Visualise the peak in a table of acquisition correlation powers.
Display, in various ways, the peak in a 2D array of acquisition correlation
powers against code phase and Doppler shift.
This is useful for visualising the output of
:meth:`peregrine.acquisition.Acquisition.acquire`.
Parameters
----------
powers : :class:`numpy.ndarray`, shape(len(`freqs`), `samples_per_code`)
2D array containing correlation powers at different frequencies and code
phases. Code phase axis is in samples from zero to `samples_per_code`.
freqs : iterable
List of frequencies mapping the results frequency index to a value in Hz.
samples_per_code : float
The number of samples corresponding to one code length.
code_length : int, optional
The number of chips in the chipping code. Defaults to the GPS C/A code
value of 1023.
"""
samples_per_chip = samples_per_code / code_length
fig = plt.figure()
fig.set_size_inches(10, 10, forward=True)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
peak = np.unravel_index(powers.argmax(), powers.shape)
powers_detail = powers[peak[0]-5:peak[0]+5, peak[1]-50:peak[1]+50]
code_phases = np.arange(samples_per_code) / samples_per_chip
ax1.plot(code_phases, powers[peak[0],:], color='black')
ax1.set_title("Code phase cross-section")
ax1.set_xlabel("Code phase (chips)")
ax1.set_ylabel("Correlation magnitude")
ax1.set_xbound(0, code_length)
ax1.set_xticks([0, code_phases[peak[1]], code_length])
ax1.set_xticklabels(['0', code_phases[peak[1]], '%.0f' % code_length])
ax2.plot(freqs, powers[:,peak[1]], color='black')
ax2.set_title("Carrier frequency cross-section")
ax2.set_xlabel("Doppler shift (Hz)")
ax2.set_ylabel("Correlation magnitude")
ax2.set_xbound(freqs[0], freqs[-1])
ax2.set_xticks([freqs[0], freqs[peak[0]], freqs[-1]])
ax3.plot(code_phases[peak[1]-50:peak[1]+50],
powers[peak[0],peak[1]-50:peak[1]+50], color='black')
ax3.set_title("Code phase cross-section detail")
ax3.set_xlabel("Code phase (chips)")
ax3.set_ylabel("Correlation magnitude")
ax3.set_xbound(code_phases[peak[1]-50], code_phases[peak[1]+50])
ax4.imshow(powers_detail, aspect='auto', cmap=plt.cm.RdYlBu_r,
extent=(code_phases[peak[1]-50],
code_phases[peak[1]+50],
freqs[peak[0]-5],
freqs[peak[0]+5]),
interpolation='bilinear')
ax4.set_title("Peak detail")
ax4.set_xlabel("Code phase (chips)")
ax4.set_ylabel("Doppler shift (Hz)")
fig.tight_layout()
def acq_plot_3d(powers, freqs, samples_per_code, code_length=1023.0):
"""
Display a 3D plot of correlation power against code phase and Doppler shift.
This is useful for visualising the output of
:meth:`peregrine.acquisition.Acquisition.acquire`.
Parameters
----------
powers : :class:`numpy.ndarray`, shape(len(`freqs`), `samples_per_code`)
2D array containing correlation powers at different frequencies and code
phases. Code phase axis is in samples from zero to `samples_per_code`.
freqs : iterable
List of frequencies mapping the results frequency index to a value in Hz.
samples_per_code : float
The number of samples corresponding to one code length.
code_length : int, optional
The number of chips in the chipping code. Defaults to the GPS C/A code
value of 1023.
"""
from mpl_toolkits.mplot3d import Axes3D
samples_per_chip = samples_per_code / code_length
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
code_phases = np.arange(samples_per_code) / samples_per_chip
X, Y = np.meshgrid(code_phases, freqs)
ax.plot_surface(X[:], Y[:], powers[:], cmap=plt.cm.RdYlBu_r, linewidth=0)
ax.set_title("Acquisition")
ax.set_xlabel("Code phase (chips)")
ax.set_xbound(0, code_length)
ax.set_ylabel("Doppler shift (Hz)")
ax.set_ybound(freqs[0], freqs[-1])
ax.set_zlabel("Correlation magnitude")
fig.tight_layout()
def main():
import argparse
import peregrine.acquisition
parser = argparse.ArgumentParser()
parser.add_argument("file", help="the acquisition results file to analyse")
args = parser.parse_args()
acq_results = peregrine.acquisition.load_acq_results(args.file)
acq_table(acq_results)
snr_bars(acq_results)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
pyspace/pyspace | pySPACE/resources/dataset_defs/performance_result.py | 1 | 76012 | """ Tabular listing data sets, parameters and a huge number of performance metrics
Store and load the performance results of an operation from a csv file,
select subsets of this results or for create various kinds of plots
**Special Static Methods**
:merge_performance_results:
Merge result*.csv files when classification fails or is aborted.
:repair_csv:
Wrapper function for whole csv repair process when classification
fails or is aborted.
"""
from itertools import cycle
try: # import packages for plotting
import pylab
import matplotlib.pyplot
import matplotlib
# uncomment for nice latex output
# pylab.rc('text', usetex=True)
# font = {'family': 'serif',
# 'size': 14}
# pylab.rc('font', **font)
except:
pass
try: # import packages for plotting error bars
import scipy.stats
except:
pass
from collections import defaultdict
import numpy
import os
import glob
# imports for storing
import yaml
import warnings
import logging
# tools
import pySPACE.tools.csv_analysis as csv_analysis
from pySPACE.tools.yaml_helpers import python2yaml
# base class
from pySPACE.resources.dataset_defs.base import BaseDataset
from pySPACE.tools.filesystem import get_author
# roc imports
import cPickle # load roc points
from operator import itemgetter
class PerformanceResultSummary(BaseDataset):
""" Classification performance results summary
For the identifiers some syntax rules hold to make some distinction:
1. Parameters/Variables start and end with `__`.
These identifiers define the processing differences of the entries.
Altogether the corresponding values build a unique key of each row.
2. Normal metrics start with a Big letter and
continue normally with small letters except AUC.
3. Meta metrics like training metrics, LOO metrics or soft metrics
start with small letters defining the category followed by a
`-` and continue with the detailed metric name.
4. Meta information like chosen optimal parameters can be
separated from metrics and variables using `~~`
at beginning and end of the information name.
This class can load a result tabular (namely the results.csv file) using
the factory method :func:`from_csv`.
Furthermore, the method :func:`project_onto` allows to select a subset of the
result collection where a parameter takes on a certain value.
The class contains various methods for plotting the loaded results.
These functions are used by the analysis operation and by the interactive
analysis GUI.
Mainly result collections are loaded for
:mod:`~pySPACE.missions.operations.comp_analysis`,
:mod:`~pySPACE.missions.operations.analysis` and
as best alternative with the :mod:`~pySPACE.run.gui.performance_results_analysis`.
They can be build e.g. with the :mod:`~pySPACE.missions.nodes.sink.classification_performance_sink` nodes,
with :ref:`MMLF <tutorial_interface_to_mmlf>` or with
:class:`~pySPACE.missions.operations.weka_classification.WekaClassificationOperation`.
The metrics as result of :mod:`~pySPACE.missions.nodes.sink.classification_performance_sink` nodes
are calculated in the :mod:`~pySPACE.resources.dataset_defs.metric` dataset module.
.. todo:: Access in result collection via indexing ndarray with one
dimension for each parameter.
Entries are indexes in list. So the corresponding values
can be accessed very fast.
.. todo:: Faster, memory efficient loading is needed. Pickling or new data
structure?
The class constructor expects the following **arguments**:
:data: A dictionary that contains a mapping from an attribute
(e.g. accuracy) to a list of values taken by this attribute.
An entry is the entirety of all i-th values over all dict-values
:tmp_pathlist:
List of files to be deleted after successful storing
When constructed via `from_multiple_csv` all included csv files
can be deleted after the collection is stored.
Therefore the parameter `delete` has to be active.
(*optional, default:None*)
:delete:
Switch for deleting files in `tmp_pathlist` after collection is stored.
(*optional, default: False*)
:Author: Mario M. Krell ([email protected])
"""
def __init__(self, data=None, dataset_md=None, dataset_dir=None,
csv_filename=None, **kwargs):
super(PerformanceResultSummary, self).__init__()
if csv_filename and not dataset_dir: # csv_filename is expected to be a path
dataset_dir=""
self.delete = False
self.tmp_pathlist = None
if dataset_md != None:
self.meta_data.update(dataset_md)
if data != None:
self.data = data
elif dataset_dir != None: # load data
if csv_filename != None:
# maybe it's not results.csv but it's definitely only one file
self.data = PerformanceResultSummary.from_csv(os.path.join(dataset_dir,
csv_filename))
elif os.path.isfile(os.path.join(dataset_dir,"results.csv")):
# delegate to from_csv_method
csv_file_path = os.path.join(dataset_dir,"results.csv")
self.data = PerformanceResultSummary.from_csv(csv_file_path)
else: # multiple csv_files
self.data, self.tmp_pathlist = \
PerformanceResultSummary.from_multiple_csv(dataset_dir)
self.delete = True
# update meta data
try:
splits = max(map(int,self.data["__Key_Fold__"]))
runs = max(map(int,self.data["__Key_Run__"]))+1
except:
warnings.warn('Splits and runs not available!')
else:
self.meta_data.update({"splits": splits, "runs": runs})
else: # we have a problem
self._log("Result tabular could not be created - data is missing!",
level=logging.CRITICAL)
warnings.warn("Result tabular could not be created - data is missing!")
self.data = {}
# modifier for getting general box plots in Gui
if not self.data.has_key('None'):
self.data['None'] = ['All'] * len(self.data.values()[0])
self.identifiers = self.data.keys()
# indexed version of the data
self.data_dict = None
self.transform()
@staticmethod
def from_csv(csv_file_path):
""" Loading data from the csv file located under *csv_file_path* """
# # pickle loading
# try:
# if csv_file_path.endswith("pickle"):
# f = open(csv_file_path, "rb")
# elif csv_file_path.endswith("csv"):
# f = open(csv_file_path[:-3] + "pickle", 'rb')
# res=cPickle.load(f)
# f.close()
# return res
# except IOError:
# pass
data_dict = csv_analysis.csv2dict(csv_file_path)
PerformanceResultSummary.translate_weka_key_schemes(data_dict)
# # save better csv version
# f = open(csv_file_path[:-3] + "pickle", "wb")
# f.write(cPickle.dumps(res, protocol=2))
# f.close()
return data_dict
@staticmethod
def from_multiple_csv(input_dir):
""" All csv files in the only function parameter 'input_dir' are
combined to just one result collection
Deleting of files will be done in the store method, *after*
the result is stored successfully.
"""
# A list of all result files (one per classification process)
pathlist = glob.glob(os.path.join(input_dir,
"results_*"))
if len(pathlist) == 0:
warnings.warn(
'No files in the format "results_*" found for merging results!')
return
result_dict = None
# For all result files of the WEKA processes or hashed files
for input_file_name in pathlist:
# first occurrence
if result_dict is None:
result_dict = csv_analysis.csv2dict(input_file_name)
PerformanceResultSummary.transfer_Key_Dataset_to_parameters(
result_dict, input_file_name)
else:
result = csv_analysis.csv2dict(input_file_name)
PerformanceResultSummary.transfer_Key_Dataset_to_parameters(
result, input_file_name)
csv_analysis.extend_dict(result_dict,result,
retain_unique_items=True)
PerformanceResultSummary.translate_weka_key_schemes(result_dict)
return (result_dict, pathlist)
def transform(self):
""" Fix format problems like floats in metric columns and tuples instead of column lists """
for key in self.get_metrics():
if not type(self.data[key][0]) == float:
try:
l = [float(value) if not value == "" else 0
for value in self.data[key]]
self.data[key] = l
except:
warnings.warn("Metric %s has entry %s not of type float."%(
key,str(value)
))
for key in self.identifiers:
if not type(self.data[key]) == tuple:
self.data[key] = tuple(self.data[key])
@staticmethod
def merge_traces(input_dir):
""" Merge and store the classification trace files in directory tree
The collected results are stored in a common file in the *input_dir*.
"""
import cPickle
traces = dict()
long_traces = dict()
save_long_traces = True
sorted_keys = None
# save merged files to delete them later
merged_files = []
for dir_path, dir_names, files in os.walk(input_dir):
for filename in files:
if filename.startswith("trace_sp"):
pass
else:
continue
main_directory = dir_path.split(os.sep)[-3]
# needed in transfer_Key_Dataset_to_parameters
temp_key_dict = defaultdict(list)
# add a temporal Key_Dataset, deleted in next step
temp_key_dict["Key_Dataset"] = [main_directory]
# read parameters from key dataset
PerformanceResultSummary.transfer_Key_Dataset_to_parameters(
temp_key_dict,
input_file_name=os.path.join(dir_path, filename))
key_dict = dict([(key,value[0]) for key, value in
temp_key_dict.items()])
# add run/split identifiers
split_number = int(filename[8:-7]) # from trace_spX.pickle
key_dict["__Key_Fold__"] = split_number
# from persistency_runX
run_number = int(dir_path.split(os.sep)[-2][15:])
key_dict["__Key_Run__"] = run_number
# transfer keys to hashable tuple of values
# the keys should always be the same
if sorted_keys is None:
sorted_keys = sorted(key_dict.keys())
traces["parameter_keys"] = sorted_keys
long_traces["parameter_keys"] = sorted_keys
identifier = []
for key in sorted_keys:
identifier.append(key_dict[key])
# load the actual classification trace
trace = cPickle.load(open(dir_path + os.sep + filename, 'rb'))
traces[tuple(identifier)] = trace
merged_files.append(dir_path + os.sep + filename)
if save_long_traces:
try:
trace = cPickle.load(open(dir_path + os.sep +"long_"+ filename, 'rb'))
long_traces[tuple(identifier)] = trace
merged_files.append(dir_path + os.sep +"long_"+ filename)
except IOError:
save_long_traces = False
# clean up
if sorted_keys is not None:
name = 'traces.pickle'
result_file = open(os.path.join(input_dir, name), "wb")
result_file.write(cPickle.dumps(traces, protocol=2))
result_file.close()
if save_long_traces:
name = 'long_traces.pickle'
result_file = open(os.path.join(input_dir, name), "wb")
result_file.write(cPickle.dumps(long_traces, protocol=2))
result_file.close()
for temp_file in merged_files:
os.remove(temp_file)
@staticmethod
def translate_weka_key_schemes(data_dict):
""" Data dict is initialized as 'defaultdict(list)' and
so the append function will work on non existing keys.
"""
if not data_dict.has_key("Key_Scheme"):
return
for i,value in data_dict["Key_scheme"].iter():
# Some special cases
# For these cases we rewrite the value to be meaningful
# Important parts of "Key_Scheme_Options" will be added to "Key_Scheme"
# Furthermore we introduce numerous new variables to benchmark
value = value.split(".")[-1]
if value == "SMO":
options = data_dict["Key_Scheme_options"][i]
options = options.split()
data_dict["__Classifier_Type__"].append(value)
for token in options:
# Search kernel type
if token.count("supportVector") >=1:
kernel_type = token.split(".")[-1]
data_dict["Kernel_Type"].append(kernel_type)
break
# Search complexity
for index, token in enumerate(options):
if token.count("-C") >=1:
complexity = options[index + 1]
data_dict["__Complexity__"].append(complexity)
# Add to value the complexity
value += " C=%s"
break
if kernel_type == 'PolyKernel':
# Search exponent in options of PolyKernel
exponent = options[options.index("-E") + 1]
if "\\" in exponent:
exponent = exponent.split("\\")[0]
#Add Kernel Type and Exponent to value
data_dict["__Kernel_Exponent__"].append(exponent)
if not exponent == "0":
value += " %s Exp=%s" % (kernel_type, exponent)
else:
value += " linear"
# unimportant parameter
data_dict["__Kernel_Gamma__"].append(0.0)
elif kernel_type == 'RBFKernel':
# Search gamma in options of RBFKernel
gamma = options[options.index("-G") + 1]
if "\\" in gamma:
gamma = gamma.split("\\")[0]
data_dict["__Kernel_Gamma__"].append(gamma)
value += " %s G=%s" % (kernel_type, gamma)
# unimportant parameter
data_dict["__Kernel_Exponent__"].append(0.0)
else:
#TODO: Warning: unknown kernel
data_dict["__Kernel_Exponent__"].append(0.0)
data_dict["__Kernel_Gamma__"].append(0.0)
# parameters used additionally in libsvm
data_dict["__Kernel_Offset__"].append(0.0)
data_dict["__Kernel_Weight__"].append(0.0)
# LibSVM works the same way as SMO and comes with WEKA.
# For NodeChainOperations a better version is integrated in C++
# It has more options, especially to weight the classes, to make oversampling unnecessary
# When using nonlinear kernels,
# one should consider the influence of the offset and for polynomial k. the scaling factor gamma.
elif value == "LibSVM":
options = data_dict["Key_Scheme_options"][i]
weight = options.split("-W")[-1]
options = options.split()
for index, token in enumerate(options):
if token.count("-S") >=1:
# 0 -- C-SVC
# 1 -- nu-SVC
# 2 -- one-class SVM
# 3 -- epsilon-SVR
# 4 -- nu-SVR
classifier = options[index + 1]
if classifier == "0":
classifier ="C_CVC"
data_dict["__Classifier_Type__"].append(classifier)
value += " %s" % (classifier)
elif token.count("-K") >=1:
# 0 -- linear: u'*v
# 1 -- polynomial: (gamma*u'*v + coef0)^degree
# 2 -- radial basis function: exp(-gamma*|u-v|^2)
# 3 -- sigmoid: tanh(gamma*u'*v + coef0)
kernel = options[index + 1]
if kernel == "0":
kernel = "linear"
elif kernel == "1":
kernel = "polynomial"
elif kernel == "2":
kernel = "RBF"
elif kernel == "3":
kernel = "sigmoid"
data_dict["__Kernel_Type__"].append(kernel)
value += " %s" % (kernel)
elif token.count("-C") >=1:
complexity = options[index + 1]
data_dict["__Complexity__"].append(complexity)
value += " C=%s" % (complexity)
elif token.count("-D") >=1:
degree = options[index + 1]
data_dict["__Kernel_Exponent__"].append(degree)
if not degree == "0":
value += " Exp=%s" % (degree)
elif token.count("-G") >=1:
gamma = options[index + 1]
data_dict["__Kernel_Gamma__"].append(gamma)
if not gamma == "0.0":
value += " G=%s" % (gamma)
elif token.count("-R") >=1:
coef0 = options[index + 1]
data_dict["__Kernel_Offset__"].append(coef0)
if not coef0 == "0.0":
value += " c0=%s" % (coef0)
elif token.count("W")>=1:
if "\\" in weight:
weight = weight.split("\\\"")[1]
data_dict["__Kernel_Weight__"].append(weight)
if not weight == "1.0 1.0":
value += " W=%s" % (weight)
else:
# TODO: Warning: unknown classifier
# All parameters of the two integrated classifier to make analysis operation compatible with other classifiers
data_dict["__Kernel_Type__"].append(value)
data_dict["__Complexity__"].append(0.0)
data_dict["__Kernel_Exponent__"].append(0.0)
data_dict["__Kernel_Gamma__"].append(0.0)
data_dict["__Kernel_Offset__"].append(0.0)
data_dict["__Kernel_Weight__"].append(0.0)
del data_dict["Key_Scheme"]
## Done
@staticmethod
def merge_performance_results(input_dir, delete_files=False):
"""Merge result*.csv files when classification fails or is aborted.
Use function with the pathname where the csv-files are stored.
E.g., merge_performance_results('/Users/seeland/collections/20100812_11_18_58')
**Parameters**
:input_dir:
Contains a string with the path where csv files are stored.
:delete_files:
controls if the csv-files will be removed after merging has finished
(optional, default: False)
:Author: Mario Krell
:Created: 2011/09/21
"""
collection = PerformanceResultSummary(dataset_dir=input_dir)
collection.delete = delete_files
collection.store(input_dir)
@staticmethod
def repair_csv(path, num_splits=None, default_dict=None, delete_files=True):
"""Wrapper function for whole csv repair process when classification fails
or is aborted.
This function performs merge_performance_results, reporting and reconstruction of missing
conditions, and a final merge. As a result two files are written:
results.csv and repaired_results.csv to the path specified.
**Parameters**
:path:
String containing the path where the classification results are
stored. This path is also used for storing the resulting csv files.
:num_splits:
Number of splits used for classification. If not specified
this information is read out from the csv file of the merge_performance_results
procedure.
(optional, default: None)
:default_dict:
A dictionary specifying default values for missing
conditions. This dictionary can e.g. be constructed using
empty_dict(csv_dict) and subsequent modification, e.g.
default_dict['Metric'].append(0). This parameter is used in
reconstruct_failures.
(optional, default: None)
:delete_files:
Controls if unnecessary files are deleted by merge_performance_results and
check_op_libSVM.
(optional, default: True)
:Author: Mario Krell, Sirko Straube
:Created: 2010/11/09
"""
PerformanceResultSummary.merge_performance_results(path, delete_files=delete_files)
filename= path + '/results.csv'
csv_dict = csv_analysis.csv2dict(filename)
if not num_splits:
num_splits = int(max(csv_dict['__Key_Fold__']))
oplist= csv_analysis.check_op_libSVM(path, delete_file=delete_files)
failures = csv_analysis.report_failures(oplist, num_splits)
final_dict= csv_analysis.reconstruct_failures(csv_dict, failures,
num_splits, default_dict=default_dict)
csv_analysis.dict2csv(path + '/repaired_results.csv', final_dict)
def store(self, result_dir, name = "results", s_format = "csv", main_metric="Balanced_accuracy"):
""" Stores this collection in the directory *result_dir*.
In contrast to *dump* this method stores the collection
not in a single file but as a whole directory structure with meta
information etc.
**Parameters**
:result_dir: The directory in which the collection will be stored
:name: The name of the file in which the result file is stored.
(*optional, default: 'results'*)
:s_format: The format in which the actual data sets should be stored.
(*optional, default: 'csv'*)
:main_metric: Name of the metric used for the shortened stored file.
If no metric is given, no shortened version is stored.
(*optional, default: 'Balanced_accuracy'*)
"""
author = get_author()
# Update the meta data
self.update_meta_data({"type" : "result",
"storage_format": s_format,
"author" : author})
# file name in which the operation's results will be stored
output_file_name = os.path.join(result_dir,name + "." + s_format)
self._log("\tWriting results to %s ..." % output_file_name)
if s_format == "csv":
#Store meta data
BaseDataset.store_meta_data(result_dir,self.meta_data)
self.data.pop("None",False)
csv_analysis.dict2csv(output_file_name, self.data)
if main_metric in self.identifiers:
reduced_data = dict()
for key in self.get_variables():
try:
if len(list(set(self.data[key]))) > 1:
reduced_data[key] = self.data[key]
except TypeError:
if len(list(set([python2yaml(item) for item in self.data[key]]))) > 1:
reduced_data[key] = self.data[key]
reduced_data[main_metric] = self.data[main_metric]
metric_list = ["True_positives","True_negatives","False_negatives","False_positives"]
for metric in [x for x in self.data.keys() if x in metric_list]:
reduced_data[metric]=self.data[metric]
output_file_name = os.path.join(result_dir,"short_"+name + "." + s_format)
csv_analysis.dict2csv(output_file_name, reduced_data)
else:
self._log("The format %s is not supported!"%s_format, level=logging.CRITICAL)
return
if self.delete:
for temp_result_file in self.tmp_pathlist:
os.remove(temp_result_file)
@staticmethod
def transfer_Key_Dataset_to_parameters(data_dict, input_file_name=None):
if not data_dict.has_key("Key_Dataset"):
return data_dict
for key_dataset in data_dict["Key_Dataset"]:
if not "}{" in key_dataset and not input_file_name is None:
hash_name = input_file_name.split("test_")
if len(hash_name) > 1:
hash_name = hash_name[-1][:-4]
else:
hash_name = input_file_name.split("train_")[-1][:-4]
# hash_name = input_file_name.split("_")[-1][:-4]
result_folder_name = os.path.dirname(input_file_name)
with open(os.path.join(result_folder_name, hash_name, "metadata.yaml")) as metadata_file:
metadata = yaml.load(metadata_file)
if "input_collection_name" in metadata:
warnings.warn(
"'input_collection_name' needs to be renamed to 'input_dataset_name'!")
metadata["input_dataset_name"]=metadata.pop("input_collection_name")
parameter_settings = metadata.get("parameter_setting", {})
hide_parameters = metadata.get("hide_parameters", [])
if not "__Dataset__" in data_dict:
data_dict["__Dataset__"] = []
data_dict["__hash__"] = []
for key in parameter_settings:
if key not in hide_parameters:
data_dict[key] = []
data_dict["__Dataset__"].append(
metadata["input_dataset_name"].strip(os.sep).split(
os.sep)[-1].strip("'}{").split("}{")[0])
for key in parameter_settings:
if key not in hide_parameters:
data_dict[key].append(parameter_settings[key])
data_dict["__hash__"].append(hash_name.strip("}{"))
else:
components = (key_dataset.strip("}{")).split("}{")
for index, attribute in enumerate(components):
if index >= 1:
# for compatibility with old data: index 1 might be the
# specification file name
if index == 1 and not ("#" in attribute):
attribute_key = "__Template__"
attribute_value = attribute
continue
try:
attribute_key, attribute_value = attribute.split("#")
except ValueError:
warnings.warn("\tValueError when splitting attributes!")
print "ValueError in result collection when splitting attributes."
continue
elif index == 0:
attribute_key = "__Dataset__"
attribute_value = attribute
data_dict[attribute_key].append(attribute_value)
del data_dict["Key_Dataset"]
return data_dict
def project_onto(self, proj_parameter, proj_values):
""" Project result collection onto a subset that fulfills all criteria
Project the result collection onto the rows where the parameter
*proj_parameter* takes on the value *proj_value*.
"""
if type(proj_values) != list:
proj_values = [proj_values]
projected_dict = defaultdict(list)
entries_added = False
for i in range(len(self.data[proj_parameter])):
if self.data[proj_parameter][i] in proj_values:
entries_added = True
for column_key in self.identifiers:
# will leave projection column in place if there are
# still different values for this parameter
if column_key == proj_parameter:
if len(proj_values) == 1: continue
projected_dict[column_key].append(self.data[column_key][i])
# If the projected_dict is empty we continue
if not entries_added:
return
return PerformanceResultSummary(projected_dict)
def get_gui_metrics(self):
""" Returns the columns in data that correspond to metrics for visualization.
This excludes 'Key_Dataset' and gui variables of the tabular,
"""
metrics = []
variables = self.get_gui_variables()
for key in self.identifiers:
if not(key in variables) or key in ['Key_Dataset']:
metrics.append(key)
# Add variables, that can be interpreted as metrics
if type(key) is str and \
(key in ['__Num_Retained_Features__',
'__Num_Eliminated_Sensors__']
or key.startswith("~") or "Pon" in key) \
and len(list(set(self.data[key]))) > 1 \
and not (key in metrics):
metrics.append(key)
return metrics
def get_metrics(self):
""" Returns the columns in data that are real metrics """
metrics = []
variables = self.get_variables()
for key in self.identifiers:
if not type(key) is str:
warnings.warn("Wrong key (%s) provided with type %s."
% (str(key), type(key)))
elif not(key in variables) and not key.startswith("~") and \
not key == "None":
metrics.append(key)
# Add variables, that can be interpreted as metrics
if key in ['__Num_Retained_Features__',
'__Num_Eliminated_Sensors__']:
metrics.append(key)
return metrics
def get_gui_variables(self):
""" Returns the column headings that correspond to 'variables' to be visualized in the Gui """
variables = []
for key in self.identifiers:
if not type(key) is str:
warnings.warn("Wrong key (%s) provided with type %s."
% (str(key), type(key)))
# special key to get box plots without parameter dependencies
elif (key == 'None' or (
(key in ['__Dataset__', 'Kernel_Weight', 'Complexity',
'Kernel_Exponent', 'Kernel_Gamma', 'Kernel_Offset',
'Classifier_Type', 'Kernel_Type', 'Key_Scheme',
'Key_Run', 'Key_Fold', 'Run', 'Split']
or key.startswith('__')
or key.startswith('~'))
and len(list(set(self.data[key]))) > 1)):
variables.append(key)
return variables
def get_variables(self):
""" Variables are marked with '__'
Everything else are metrics, meta metrics, or processing information.
"""
variables = []
for key in self.identifiers:
if not type(key) is str:
warnings.warn("Wrong key (%s) provided with type %s."
% (str(key), type(key)))
elif key.startswith('__'):
variables.append(key)
return variables
def get_parameter_values(self, parameter):
""" Returns the values that *parameter* takes on in the data """
return set(self.data[parameter])
def get_nominal_parameters(self, parameters):
""" Returns a generator over the nominal parameters in *parameters*
.. note:: Nearly same code as in *get_numeric_parameters*.
Changes in this method should be done also to this method.
"""
for parameter in parameters:
try:
# Try to create a float of the first value of the parameter
[float(value) for value in self.data[parameter]]
# No exception and enough entities thus a numeric attribute
if len(set(self.data[parameter])) >= 5:
continue
else:
yield parameter
except ValueError:
# This is not a numeric parameter, treat it as nominal
yield parameter
except KeyError:
# This exception should inform the user about wrong parameters
# in his YAML file.
import warnings
warnings.warn('The parameter "' + parameter
+ '" is not contained in the PerformanceResultSummary')
except IndexError:
# This exception informs the user about wrong parameters in
# his YAML file.
import warnings
warnings.warn('The parameter "' + parameter
+ '" has no values.')
def get_numeric_parameters(self, parameters):
""" Returns a generator over the numeric parameters in *parameters*
.. note:: Nearly same code as in *get_nominal_parameters*.
Changes in this method should be done also to this method.
"""
for parameter in parameters:
try:
# Try to create a float of the first value of the parameter
float(self.data[parameter][0])
# No exception and enough entities thus a numeric attribute
if len(set(self.data[parameter]))>=5:
yield parameter
else:
continue
except ValueError:
# This is not a numeric parameter, treat it as nominal
continue
except KeyError:
#"This exception should inform the user about wrong parameters
# in his YAML file."
import warnings
warnings.warn('The parameter "' + parameter
+ '" is not contained in the PerformanceResultSummary')
except IndexError:
#This exception informs the user about wrong parameters in
# his YAML file.
import warnings
warnings.warn('The parameter "' + parameter
+ '" has no values.')
def dict2tuple(self,dictionary):
""" Return dictionary values sorted by key names """
keys=sorted(dictionary.keys())
l=[]
for key in keys:
l.append(dictionary[key])
return tuple(l)
def get_indexed_data(self):
""" Take the variables and create a dictionary with variable entry tuples as keys """
# index keys
self.variables = sorted(self.get_variables())
# other keys
keys = [key for key in self.identifiers if not key in self.variables]
# final dictionary
data_dict = {}
for i in range(len(self.data[self.variables[0]])):
var_dict = {}
perf_dict = {}
# read out variable values
for variable in self.variables:
value = self.data[variable][i]
var_dict[variable] = value
perf_dict[variable] = value
# read out the rest
for key in keys:
perf_dict[key] = self.data[key][i]
# save it into dictionary by mapping values to tuple as key/index
data_dict[self.dict2tuple(var_dict)] = perf_dict
return data_dict
def get_performance_entry(self, search_dict):
""" Get the line in the data, which corresponds to the `search_dict` """
search_tuple = self.dict2tuple(search_dict)
if self.data_dict is None:
self.data_dict = self.get_indexed_data()
return self.data_dict.get(search_tuple,None)
def plot_numeric(self, axes, x_key, y_key, conditions=[]):
""" Creates a plot of the y_key for the given numeric parameter x_key.
A function that allows to create a plot that visualizes the effect
of differing one variable onto a second one (e.g. the effect of
differing the number of features onto the accuracy).
**Expected arguments**
:axes: The axes into which the plot is written
:x_key: The key of the dictionary whose values should be used as
values for the x-axis (the independent variable)
:y_key: The key of the dictionary whose values should be used as
values for the y-axis, i.e. the dependent variable
:conditions: A list of functions that need to be fulfilled in order to
use one entry in the plot. Each function has to take two
arguments: The data dictionary containing all entries and
the index of the entry that should be checked. Each condition
must return a boolean value.
"""
colors = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k', 'brown', 'gray'])
linestyles = cycle(['-']*9 + ['--']*9 + [':']*9 + ['-.']*9)
curves = defaultdict(lambda : defaultdict(list))
for i in range(len(self.data[x_key])):
# Check is this particular entry should be used
if not all(condition(self.data, i) for condition in conditions):
continue
# Get the value of the independent variable for this entry
x_value = float(self.data[x_key][i])
# Attach the corresponding value to the respective partition
if y_key.count("#") == 0:
y_value = float(self.data[y_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = y_key.split("#")
y_value = float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
curves[y_key][x_value].append(y_value)
for y_key, curve in curves.iteritems():
curve_x = []
curve_y = []
for x_value, y_values in sorted(curve.iteritems()):
curve_x.append(x_value)
curve_y.append(y_values)
# Create an error bar plot
axes.errorbar(curve_x, map(numpy.mean, curve_y),
yerr=map(scipy.stats.sem, curve_y),
elinewidth = 1, capsize = 5, label=y_key,
color = colors.next(), linestyle=linestyles.next())
axes.set_xlabel(x_key)
if y_key.count("#") == 0:
axes.set_ylabel(y_key.strip("_").replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(y_key.split("#")))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
# Return figure name
return "_".join([y_key, x_key])
def plot_numeric_vs_numeric(self, axes, axis_keys, value_key, scatter=True):
""" Contour plot of the value_key for the two numeric parameters axis_keys.
A function that allows to create a contour plot that visualizes the effect
of differing two variables on a third one (e.g. the effect of differing
the lower and upper cutoff frequency of a bandpass filter onto
the accuracy).
**Parameters**
:axes: The axes into which the plot is written
:axis_keys: The two keys of the dictionary that are assumed to have \
an effect on a third variable (the dependent variable)
:value_key: The dependent variables whose values determine the \
color of the contour plot
:scatter: Plot nearly invisible dots behind the real data points.
(*optional, default: True*)
"""
assert(len(axis_keys) == 2)
# Determine a sorted list of the values taken on by the axis keys:
x_values = set([float(value) for value in self.data[axis_keys[0]]])
x_values = sorted(list(x_values))
y_values = set([float(value) for value in self.data[axis_keys[1]]])
y_values = sorted(list(y_values))
#Done
# We cannot create a contour plot if one dimension is only 1d
if len(x_values) == 1 or len(y_values) == 1:
return
# Create a meshgrid of them
X, Y = pylab.meshgrid(x_values, y_values)
# Determine the average value taken on by the dependent variable
# for each combination of the the two source variables
Z = numpy.zeros((len(x_values),len(y_values)))
counter = numpy.zeros((len(x_values),len(y_values)))
for i in range(len(self.data[axis_keys[0]])):
x_value = float(self.data[axis_keys[0]][i])
y_value = float(self.data[axis_keys[1]][i])
if value_key.count("#") == 0:
performance_value = float(self.data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
performance_value = float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
Z[x_values.index(x_value), y_values.index(y_value)] += performance_value
counter[x_values.index(x_value), y_values.index(y_value)] += 1
Z = Z / counter
# Create the plot for this specific dependent variable
cf = axes.contourf(X, Y, Z.T, 100)
axes.get_figure().colorbar(cf)
if scatter:
axes.scatter(X,Y,marker='.',facecolors='None', alpha=0.1)
axes.set_xlabel(axis_keys[0].strip("_").replace("_", " "))
axes.set_ylabel(axis_keys[1].strip("_").replace("_", " "))
axes.set_xlim(min(x_values), max(x_values))
axes.set_ylim(min(y_values), max(y_values))
if value_key.count("#") == 0:
axes.set_title(value_key.strip("_").replace("_", " "))
else:
axes.set_title("%s*%s+%s*%s" % tuple(value_key.split("#")))
# Return figure name
return "%s_%s_vs_%s" % (value_key, axis_keys[0].strip("_").replace("_", " "), axis_keys[1].strip("_").replace("_", " "))
def plot_numeric_vs_nominal(self, axes, numeric_key, nominal_key, value_key,
dependent_BA_plot=False, relative_plot=False, minimal=False):
""" Plot for comparison of several different values of a nominal parameter with mean and standard error
A function that allows to create a plot that visualizes the effect of
varying one numeric parameter onto the performance for several
different values of a nominal parameter.
**Parameters**
:axes: The axes into which the plot is written
:numeric_key: The numeric parameter whose effect (together with the
nominal parameter) onto the dependent variable should
be investigated.
:nominal_key: The nominal parameter whose effect (together with the
numeric parameter) onto the dependent variable should
be investigated.
:value_key: The dependent variable whose values determine the
color of the contour plot
:dependent_BA_plot:
If the `value_key` contains *time* or *iterations*
and this variable is True, the value is replaced by
*Balanced_Accuracy* and the `nominal_key` by the `value_key`.
The point in the graph are constructed by averaging
over the old `nominal parameter`.
(*optional, default: False*)
:relative_plot:
The first `nominal_key` value (alphabetic ordering) is chosen and the other
parameters are averaged relative to this parameter, to show
by which factor they change the metric.
Therefore a clean tabular is needed with only relevant
variables correctly named and where each parameter is compared
with the other. Relative plots and dependent_BA plots can be combined.
(*optional, default: False*)
:minimal:
Do not plot labels and legends.
(*optional, default: False*)
"""
colors = cycle(['b','r', 'g', 'c', 'm', 'y', 'k', 'brown', 'gray','orange'])
linestyles = cycle(['-']*10 + ['-.']*10 + [':']*10 + ['--']*10)
eps=10**(-6)
# Determine a mapping from the value of the nominal value to a mapping
# from the value of the numeric value to the achieved performance:
# nominal -> (numeric -> performance)
if (("time" in value_key) or ("Time" in value_key) or ("iterations" in value_key)) and dependent_BA_plot:
dependent_key = value_key
value_key = "Balanced_accuracy"
else:
dependent_key = False
relative_plot = False
if relative_plot:
rel_par = sorted(list(set(self.data[nominal_key])))[0]
rel_vars = self.get_variables()
curves = defaultdict(lambda: defaultdict(list))
for i in range(len(self.data[nominal_key])):
curve_key = self.data[nominal_key][i]
parameter_value = float(self.data[numeric_key][i])
if value_key.count("#") == 0:
performance_value = float(self.data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
performance_value = \
float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
if relative_plot:
if curve_key == rel_par:
factor = 1
performance_value = 1
if dependent_key:
dependent_factor = self.data[dependent_key][i]
else:
rel_vars_dict = dict()
for var in rel_vars:
rel_vars_dict[var] = self.data[var][i]
rel_vars_dict[nominal_key] = rel_par
rel_data = self.get_performance_entry(rel_vars_dict)
if value_key.count("#") == 0:
try:
factor = float(rel_data[value_key])
except TypeError,e:
print rel_data
print value_key
print rel_vars_dict
print rel_vars_dict.keys()
raise(e)
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
factor = float(weight1) * float(rel_data[value_key1]) \
+ float(weight2) * float(rel_data[value_key2])
dependent_factor = rel_data.get(dependent_key,1)
if dependent_factor == 0:
dependent_factor = eps
warnings.warn("Dependent key %s got zero value in reference %s."%(
str(dependent_key),rel_par
))
if factor == 0:
factor = eps
warnings.warn("Value key %s got zero value in reference %s."%(
str(value_key),rel_par
))
else:
factor = 1
dependent_factor = 1
if not dependent_key:
curves[curve_key][parameter_value].append(performance_value/factor)
else:
curves[curve_key][parameter_value].append((performance_value/factor,float(self.data[dependent_key][i])/float(dependent_factor)))
# Iterate over all values of the nominal parameter and create one curve
# in the plot showing the mapping from numeric parameter to performance
# for this particular value of the nominal parameter
for curve_key, curve in sorted(curves.iteritems()):
curve_key = curve_key.strip("_").replace("_", " ")
x_values = []
y_values = []
y_errs = []
x_errs = []
for x_value, y_value in sorted(curve.iteritems()):
if not dependent_key:
x_values.append(x_value)
# Plot the mean of all values of the performance for this
# particular combination of nominal and numeric parameter
y_values.append(pylab.mean(y_value))
y_errs.append(scipy.stats.sem(y_value))
x_errs = None
else:
# calculate mean and standard deviation
# of metric and dependent parameter values and
# use the dependent parameter as x_value
# and the metric as y_value
mean = numpy.mean(y_value,axis=0)
metric_mean = mean[0]
time_mean = mean[1]
sem = scipy.stats.sem(y_value,axis=0)
metric_sem = sem[0]
time_sem = sem[1]
x_values.append(time_mean)
y_values.append(metric_mean)
x_errs.append(time_sem)
y_errs.append(metric_sem)
if len(x_values)<101:
if minimal:
axes.errorbar(
x_values, y_values, xerr = x_errs, yerr=y_errs,
# label=curve_key,
color=colors.next(), linestyle=linestyles.next(),
# lw=2, elinewidth=0.8, capsize=3,marker='x')
lw=4, elinewidth=0.8, capsize=3,marker='x')
else:
axes.errorbar(
x_values, y_values, xerr = x_errs, yerr=y_errs,
label=curve_key,
color=colors.next(), linestyle=linestyles.next(),
lw=2, elinewidth=0.8, capsize=3,marker='x')
else:
axes.errorbar(x_values, y_values, xerr = x_errs, yerr=y_errs,
label=curve_key,
color = colors.next(), linestyle=linestyles.next(),
lw=1, elinewidth=0.04,capsize=1)
if dependent_key:
numeric_key = dependent_key.strip("_") + " averaged dependent on " + numeric_key.strip("_")
if relative_plot:
value_key = value_key.strip("_")+" relative to "+ rel_par
if minimal:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
else:
axes.set_xlabel(numeric_key.strip("_").replace("_", " "))
if value_key.count("#") == 0:
axes.set_ylabel(value_key.strip("_").replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(value_key.split("#")))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major',
color='lightgrey', alpha=0.5)
axes.set_axisbelow(True)
prop = matplotlib.font_manager.FontProperties(size='xx-small')
prop = matplotlib.font_manager.FontProperties(size='small')
if not nominal_key=="None":
lg=axes.legend(prop=prop, loc=0,fancybox=True,title=nominal_key.strip("_").replace("_", " "))
lg.get_frame().set_facecolor('0.90')
lg.get_frame().set_alpha(.3)
# axes.set_xscale('log')
# Return figure name
return "%s_%s_vs_%s" % (value_key, nominal_key, numeric_key)
def plot_nominal(self, axes, x_key, y_key):
""" Creates a boxplot of the y_key for the given nominal parameter x_key.
A function that allows to create a plot that visualizes the effect
of differing one nominal variable onto a second one (e.g. the effect of
differing the classifier onto the accuracy).
**Expected arguments**
:axes: The axes into which the plot is written
:x_key: The key of the dictionary whose values should be used as
values for the x-axis (the independent variables)
:y_key: The key of the dictionary whose values should be used as
values for the y-axis, i.e. the dependent variable
"""
# Create the plot for this specific dependent variable
values = defaultdict(list)
for i in range(len(self.data[x_key])):
parameter_value = self.data[x_key][i]
if y_key.count("#") == 0:
performance_value = float(self.data[y_key][i])
else: # A weighted cost function
weight1, y_key1, weight2, y_key2 = y_key.split("#")
performance_value = float(weight1) * float(self.data[y_key1][i]) \
+ float(weight2) * float(self.data[y_key2][i])
values[parameter_value].append(performance_value)
values = sorted(values.items(), reverse=True)
# the bottom of the subplots of the figure
axes.figure.subplots_adjust(bottom = 0.3)
axes.boxplot(map(lambda x: x[1], values))
axes.set_xticklabels(map(lambda x: x[0], values))
matplotlib.pyplot.setp(axes.get_xticklabels(), rotation=-90)
matplotlib.pyplot.setp(axes.get_xticklabels(), size='small')
axes.set_xlabel(x_key.replace("_", " "))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
if y_key.count("#") == 0:
axes.set_ylabel(y_key.replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(y_key.split("#")))
# Return figure name
return "%s_%s" % (y_key, x_key)
def plot_nominal_vs_nominal(self, axes, nominal_key1, nominal_key2, value_key):
""" Plot comparison of several different values of two nominal parameters
A function that allows to create a plot that visualizes the effect of
varying one nominal parameter onto the performance for several
different values of another nominal parameter.
**Expected arguments**
:axes: The axes into which the plot is written
:nominal_key1: The name of the first nominal parameter whose effect
shall be investigated. This parameter determines the
x-axis.
:nominal_key2: The second nominal parameter. This parameter will be
represented by a different color per value.
:value_key: The name of the dependent variable whose values
determines the y-values in the plot.
"""
from matplotlib.patches import Polygon, Rectangle
# boxColors = ['b','r', 'g', 'c', 'm', 'y', 'k', 'brown', 'gray']
boxColors = ['steelblue','burlywood', 'crimson', 'olive', 'cadetblue',
'cornflowerblue', 'darkgray', 'darkolivegreen',
'goldenrod', 'lightcoral', 'lightsalmon', 'lightseagreen',
'lightskyblue', 'lightslategray', 'mediumseagreen',
'mediumturquoise', 'mediumvioletred', 'navy', 'orange',
'tan', 'teal', 'yellowgreen']
# Gathering of the data
plot_data = defaultdict(lambda: defaultdict(list))
for i in range(len(self.data[nominal_key2])):
nom1_key = self.data[nominal_key1][i]
nom2_key = self.data[nominal_key2][i]
if value_key.count("#") == 0:
performance_value = float(self.data[value_key][i])
else: # A weighted cost function
weight1, value_key1, weight2, value_key2 = value_key.split("#")
performance_value = \
float(weight1) * float(self.data[value_key1][i]) \
+ float(weight2) * float(self.data[value_key2][i])
plot_data[nom1_key][nom2_key].append(performance_value)
# Prepare data for boxplots
box_data = []
nom1_keys = []
for nom1_key, curve in sorted(plot_data.iteritems(), reverse=True):
x_values = []
y_values = []
nom1_keys.append(nom1_key)
for x_value, y_values in sorted(curve.iteritems()):
box_data.append(y_values)
# Make sure we always have enough colors available
nom2_keys = sorted(plot_data[nom1_key].keys())
while len(nom2_keys) > len(boxColors):
boxColors += boxColors
# the bottom of the subplots of the figure
axes.figure.subplots_adjust(bottom=0.3)
# position the boxes in the range of +-0.25 around {1,2,3,...}
box_positions=[]
for i in range(len(nom1_keys)):
if len(nom2_keys) > 1:
box_positions.extend([i+1 - .25 + a*.5/(len(nom2_keys)-1)
for a in range(len(nom2_keys))])
else:
box_positions.extend([i+1])
# actual plotting; width of the boxes:
w = .5 if len(nom2_keys) == 1 else .35/(len(nom2_keys)-1)
bp = axes.boxplot(box_data, positions=box_positions, widths=w)
# design of boxplot components
matplotlib.pyplot.setp(bp['boxes'], color='black')
matplotlib.pyplot.setp(bp['whiskers'], color='black')
matplotlib.pyplot.setp(bp['fliers'], color='grey', marker='+', mew=1.5)
# use the nom1 keys as x-labels
axes.set_xticks([i+1 for i in range(len(nom1_keys))], minor=False)
axes.set_xticklabels(nom1_keys)
matplotlib.pyplot.setp(axes.get_xticklabels(), rotation=-90)
matplotlib.pyplot.setp(axes.get_xticklabels(), size='small')
axes.set_xlabel(nominal_key1.replace("_", " "))
# Now fill the boxes with desired colors by superposing polygons
numBoxes = len(nom1_keys)*len(nom2_keys)
medians = range(numBoxes)
# get all box coordinates
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
# cycle through predefined colors
k = i % len(nom2_keys)
# draw polygon
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
axes.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
axes.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Draw a legend by hand. As the legend is hand made, it is not easily
# possible to change it's location or size - sorry for inconvenience.
# width of the axes and xy-position of legend element #offset
dxy = [axes.get_xlim()[1]-axes.get_xlim()[0],
axes.get_ylim()[1]-axes.get_ylim()[0]]
xy = lambda offset: [axes.get_xlim()[0] + .8*dxy[0],
axes.get_ylim()[0] + .03*dxy[1]
+ .05*dxy[1]*offset]
# Background rectangle for the legend.
rect = Rectangle([xy(0)[0]-.02*dxy[0], xy(0)[1]-.02*dxy[1]],
.2*dxy[0],(.05*(len(nom2_keys)+1)+0.0175)*dxy[1],
facecolor='lightgrey', fill=True, zorder=5)
# legend "title"
axes.text(xy(len(nom2_keys))[0]+.03*dxy[0], xy(len(nom2_keys))[1]+.005*dxy[1],
nominal_key2.strip("_").replace("_", " "),
color='black', weight='roman', size='small', zorder=6)
axes.add_patch(rect)
# rect and text for each nom2-Value
for key in range(len(nom2_keys)):
rect = Rectangle(xy(key),.05*dxy[0],.035*dxy[1],
facecolor=boxColors[len(nom2_keys)-key-1], zorder=6)
axes.add_patch(rect)
axes.text(xy(key)[0]+.06*dxy[0], xy(key)[1]+.005*dxy[1],
nom2_keys[len(nom2_keys)-key-1],
color='black', weight='roman', size='small', zorder=6)
# Add a horizontal grid to the plot
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
if value_key.count("#") == 0:
axes.set_ylabel(value_key.strip("_").replace("_", " "))
else:
axes.set_ylabel("%s*%s+%s*%s" % tuple(value_key.split("#")))
# display nearly invisible lines in the back for better orientation
axes.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
axes.set_axisbelow(True)
# Return figure name
return "%s_%s_vs_%s" % (value_key, nominal_key1, nominal_key2)
def plot_histogram(self, axes, metric, numeric_parameters, nominal_parameters,
average_runs = True):
""" Plots a histogram of the values the given metric takes on in data
Plots histogram for *metric* in which each parameter combination from
*numeric_parameters* and *nominal_parameters* corresponds
to one value (if *average_runs* == True) or each run corresponds
to one value (if *average_runs* == False).
The plot is written into *axes*.
"""
if average_runs == False:
metric_values = map(float, self.data[metric])
else:
# Merge all parameters in one list
parameters = list(numeric_parameters)
parameters.extend(nominal_parameters)
# Sort metric values according to the parameterization for the
# specific value
all_values = defaultdict(list)
for i in range(len(self.data[metric])):
key = tuple(self.data[parameter][i] for parameter in parameters)
all_values[key].append(float(self.data[metric][i]))
# Combine the mean value of the metric for each parameter
# combination
metric_values = [numpy.mean(value)
for value in all_values.itervalues()]
# Plot and store the histogram
axes.hist(metric_values, histtype='stepfilled', align='left')
axes.set_ylim((0, pylab.ylim()[1]))
axes.set_xlabel(metric if average_runs == False
else "Mean %s" % metric)
axes.set_ylabel('Occurrences')
# Return figure name
return "%s_histogram" % metric
###############################################################################
class ROCCurves(object):
""" Class for plotting ROC curves """
def __init__(self, base_path):
self.roc_curves = self._load_all_curves(base_path)
self.colors = cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k', 'brown', 'gray'])
def is_empty(self):
""" Return whether there are no loaded ROC curves """
return len(self.roc_curves) == 0
def plot(self, axis, selected_variable, projection_parameter, fpcost=1.0,
fncost=1.0, collection=None):
# Draw cost grid into the background
for cost in numpy.linspace(0.0, fpcost+fncost, 25):
axis.plot([0.0, 1.0], [1-cost/fncost, 1-(cost-fpcost)/fncost],
c='gray', lw=0.5)
# # If we do not average:
# if selected_variable == None:
# # Delegate to plot_all method
# return self.plot_all(axis, projection_parameter, collection)
# Draw an additional "axis" (the identity) to show skew/centroid of
# ROC curves
axis.plot([0.0, 1.0], [0.0, 1.0], c='k', lw=2)
for k in numpy.linspace(0.0, 1.0, 11):
axis.plot([k+0.01, k-0.01], [k-0.01, k+0.01], c='k', lw=1)
# Create a color dict
color_dict = defaultdict(lambda : self.colors.next())
# Some helper function
def create_roc_function(roc_curve):
""" Create a function mapping FPR onto TPR for the given roc_curve
"""
def roc_function(query_fpr):
""" Map FPR onto TPR using linear interpolation on ROC curve."""
if query_fpr == 0.0: return 0.0 # Avoid division by zero
last_fpr, last_tpr = 0.0, 0.0
for fpr, tpr in roc_curve:
if fpr >= query_fpr:
return (query_fpr - last_fpr) / (fpr - last_fpr) * \
(tpr - last_tpr) + last_tpr
last_fpr, last_tpr = fpr, tpr
return tpr
return roc_function
def create_weight_function(x_values, mean_curve):
"""
Creates a function that computes the orthogonal distance of the ROC
curve from the identity axis at an arbitrary (k,k)
"""
def weight_function(k):
"""
Creates a function that computes the orthogonal distance of the
ROC curve from the identity axis at (k,k)
"""
if k == 0.0: return 0.0 # Avoid division by zero
for fpr, tpr in zip(x_values, mean_curve):
if 0.5 * fpr + 0.5 * tpr >= k:
return 2 * (0.5 * fpr - 0.5 * tpr)**2
return 0.0
return weight_function
# Create mapping parameterization -> ROC functions
roc_fct_dict = defaultdict(list)
for parametrization, roc_curve in self._project_onto_subset(
self.roc_curves, projection_parameter):
key = parametrization[selected_variable] \
if selected_variable is not None and selected_variable \
in parametrization.keys() else "Global"
roc_fct_dict[key].append(create_roc_function(roc_curve))
# Iterate over all parametrization and average ROC functions and compute
# centroid
for param, roc_fcts in roc_fct_dict.iteritems():
x_values = numpy.linspace(0.0, 1.0, 500)
roc_values = []
for x in x_values:
roc_values.append([roc_fct(x) for roc_fct in roc_fcts])
mean_curve = map(numpy.mean, roc_values)
# Compute centroid of the mean ROC curve over the identity axis
weight_fct = create_weight_function(x_values, mean_curve)
k_values = numpy.linspace(0.0, 1.0, 100)
weights = [weight_fct(k) for k in numpy.linspace(0.0, 1.0, 100)]
centroid = sum(k_values[i]*weights[i] for i in range(len(k_values))) \
/ sum(weights)
if selected_variable == None:
color = self.colors.next()
else:
color = color_dict[param]
axis.plot(x_values, mean_curve, c=color,
label=str(param).replace("_"," ").strip())
axis.errorbar(x_values[::25], mean_curve[::25],
yerr=map(scipy.stats.sem, roc_values)[::25],
c=color, fmt='.')
axis.plot([centroid], [centroid],
c=color, marker='h')
axis.set_xlabel("False positive rate")
axis.set_ylabel("True positive rate")
axis.set_xlim(0.0, 1.0)
axis.set_ylim(0.0, 1.0)
axis.legend(loc=0)
if selected_variable is not None:
axis.set_title(str(selected_variable).replace("_"," ").strip())
def plot_all(self, axis, projection_parameter, collection=None):
""" Plot all loaded ROC curves after projecting onto subset. """
# Iterate over all ROC curves for parametrization that are selected
# by projection_parameter.
for parametrization, roc_curve in self._project_onto_subset(self.roc_curves,
projection_parameter):
color = self.colors.next()
axis.plot(map(itemgetter(0), roc_curve), map(itemgetter(1), roc_curve),
c=color)
# fpr = eval(collection.data['False_positive_rate'][0])
# tpr = eval(collection.data['True_positive_rate'][0])
# axis.scatter([fpr], [tpr], c='k', s=50)
axis.set_xlabel("False positive rate")
axis.set_ylabel("True positive rate")
axis.set_xlim(0.0, 1.0)
axis.set_ylim(0.0, 1.0)
axis.legend(loc=0)
def _load_all_curves(self, dir):
""" Load all ROC curves located in the persistency dirs below *dir* """
all_roc_curves = []
for subdir in [name for name in os.listdir(dir)
if os.path.isdir(os.path.join(dir, name))]:
if not subdir.startswith("{"): continue
parametrization = {}
tokens = subdir.strip("}{").split("}{")
parametrization["__Dataset__"] = tokens[0]
for token in tokens[1:]:
# TODO if anything else then node chain template
# has no # this will fail;
# delete as soon as no more data with node chain templates
# in folder names circulate
if '#' not in token:
parametrization["__Template__"] = token
continue
key, value = token.split("#")
try:
value = eval(value)
except:
pass
parametrization[key] = value
for run_dir in glob.glob(dir + os.sep + subdir
+ os.sep + "persistency_run*"):
run = eval(run_dir.split("persistency_run")[1])
for split_file in glob.glob(run_dir + os.sep + "PerformanceSinkNode"
+ os.sep + "roc_points_sp*.pickle"):
split = eval(split_file.split("roc_points_sp")[1].strip(".pickle"))
rs_parametrization = dict(parametrization)
rs_parametrization["__Key_Run__"] = run
rs_parametrization["__Run__"] = "__Run_"+str(run)
rs_parametrization["__Key_Fold__"] = split
rs_parametrization["__Split__"] = "__Split_"+str(split)
roc_curves = cPickle.load(open(split_file, 'r'))
all_roc_curves.append((rs_parametrization, roc_curves[0]))
return all_roc_curves
def _project_onto_subset(self, roc_curves, constraints):
""" Retain only roc_curves that fulfill the given constraints. """
for parametrization, roc_curve in roc_curves:
# Check constraints
constraints_fulfilled = True
for constraint_key, constraint_values in constraints.iteritems():
if not constraint_key in parametrization or not \
parametrization[constraint_key] in constraint_values:
constraints_fulfilled = False
break
if constraints_fulfilled:
yield (parametrization, roc_curve)
| bsd-3-clause |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/msgpack/test_extension.py | 2 | 2179 | import array
import pandas.io.msgpack as msgpack
from pandas.io.msgpack import ExtType
from .common import frombytes, tobytes
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b"A") == b"\xd4\x42A" # fixext 1
assert p(b"AB") == b"\xd5\x42AB" # fixext 2
assert p(b"ABCD") == b"\xd6\x42ABCD" # fixext 4
assert p(b"ABCDEFGH") == b"\xd7\x42ABCDEFGH" # fixext 8
assert p(b"A" * 16) == b"\xd8\x42" + b"A" * 16 # fixext 16
assert p(b"ABC") == b"\xc7\x03\x42ABC" # ext 8
assert p(b"A" * 0x0123) == b"\xc8\x01\x23\x42" + b"A" * 0x0123 # ext 16
assert (
p(b"A" * 0x00012345) == b"\xc9\x00\x01\x23\x45\x42" + b"A" * 0x00012345
) # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b"\xd4\x42A", ExtType(0x42, b"A")) # fixext 1
check(b"\xd5\x42AB", ExtType(0x42, b"AB")) # fixext 2
check(b"\xd6\x42ABCD", ExtType(0x42, b"ABCD")) # fixext 4
check(b"\xd7\x42ABCDEFGH", ExtType(0x42, b"ABCDEFGH")) # fixext 8
check(b"\xd8\x42" + b"A" * 16, ExtType(0x42, b"A" * 16)) # fixext 16
check(b"\xc7\x03\x42ABC", ExtType(0x42, b"ABC")) # ext 8
check(b"\xc8\x01\x23\x42" + b"A" * 0x0123, ExtType(0x42, b"A" * 0x0123)) # ext 16
check(
b"\xc9\x00\x01\x23\x45\x42" + b"A" * 0x00012345,
ExtType(0x42, b"A" * 0x00012345),
) # ext 32
def test_extension_type():
def default(obj):
print("default called", obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = tobytes(obj)
return ExtType(typecode, data)
raise TypeError("Unknown type object {obj!r}".format(obj))
def ext_hook(code, data):
print("ext_hook called", code, data)
assert code == 123
obj = array.array("d")
frombytes(obj, data)
return obj
obj = [42, b"hello", array.array("d", [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
| apache-2.0 |
SkyHenryk/KBL-ANN | KBL-ANN/mining/kboDataPreprocessing.py | 1 | 6248 | # -*- coding: utf-8 -*-
import csv
import numpy as np
import pandas as pd
class kboDataPreprocessing:
matchYears = [2016]
dataFolder = "../data"
def start(self):
allStatus, allMatchResult = self.loadAllMatchResult(self.matchYears)
preprocessedData = self.preprocessingData(allStatus, allMatchResult)
self.saveInCsv(preprocessedData)
def loadAllMatchResult(self, matchYears):
allStatus = {}
allMatchResult = None
for matchYearIndex, matchYear in enumerate(matchYears):
for statusYearIndex in [0, 1]:
statusYear = self.loadStatusYear(matchYear,statusYearIndex)
allStatus[statusYear] = {}
for statusType in ["player", "team"]:
allStatus[statusYear][statusType] = {}
for statusPosition in ["batter", "pitcher"]:
allStatus[statusYear][statusType][statusPosition] = {}
with open(f"{self.dataFolder}/kbo_{statusType}_{statusPosition}_{statusYear}.csv",
newline='') as kboStatus:
kboStatusReader = csv.reader(kboStatus, delimiter=',', quotechar='|')
for status in kboStatusReader:
allStatus[statusYear][statusType][statusPosition][status[0]] = status[-7:]
with open(f"{self.dataFolder}/kbo_result_{matchYear}.csv", newline='') as kboResult:
kboResultReader = csv.reader(kboResult, delimiter=',', quotechar='|')
kboResult = np.array(list(kboResultReader))
kboResult = np.insert(kboResult, 0, matchYear, axis=1)
kboResult = np.insert(kboResult, 1, statusYear, axis=1)
kboResult = np.insert(kboResult, 2, matchYear - int(statusYear), axis=1)
if allMatchResult is None:
allMatchResult = kboResult
else:
allMatchResult = np.concatenate((allMatchResult, kboResult), axis=0)
return allStatus, allMatchResult
def loadStatusYear(self, matchYear, statusYearIndex):
return str(matchYear - statusYearIndex)
def preprocessingData(self, allStatus, allMatchResult):
matchYear = allMatchResult[:, 0]
statusYear = allMatchResult[:, 2]
matchMonth = [x.split(".")[0] for x in allMatchResult[:, 3]]
homeTeamName = allMatchResult[:, 5]
scoreData = allMatchResult[:, 6]
awayTeamName = allMatchResult[:, 7]
matchYearOneHot = self.oneHot(matchYear)
statusYearOneHot = self.oneHot(statusYear)
matchMonthOneHot = self.oneHot(matchMonth)
homeTeamOneHot = self.oneHot(homeTeamName)
awayTeamOneHot = self.oneHot(awayTeamName)
scoreDif = np.array([int(x.split(":")[0]) - int(x.split(":")[1]) for x in scoreData])
scoreWinningRate = np.vectorize(self.calculateWinningRate)(scoreDif)
scoreWinningRateOneShot = self.oneHot(scoreWinningRate)
allPlayerStatus = self.loadStatusByPlayerName(allStatus, allMatchResult)
preprocessedData = np.concatenate(
(statusYearOneHot, matchYearOneHot, matchMonthOneHot, homeTeamOneHot, awayTeamOneHot, allPlayerStatus,
scoreWinningRateOneShot), axis=1)
np.random.shuffle(preprocessedData)
return preprocessedData
def calculateWinningRate(self, x):
if x > 1:
return 1
else:
return 0
def loadStatusByPlayerName(self, allStatus, allMatchResult):
allMatchStatus = []
for MatchResult in allMatchResult:
matchStatus = []
yearStatus = allStatus.get(MatchResult[1])
yearPlayerStatus = yearStatus.get("player")
yearTeamStatus = yearStatus.get("team")
if yearPlayerStatus is None:
continue
yearPlayerPitcherStatus = yearPlayerStatus.get("pitcher")
yearPlayerBatterStatus = yearPlayerStatus.get("batter")
if yearTeamStatus is None:
continue
yearTeamPitcherStatus = yearTeamStatus.get("pitcher")
yearTeamBatterStatus = yearTeamStatus.get("batter")
homePicherStatus = yearPlayerPitcherStatus.get(MatchResult[8].strip())
if homePicherStatus is None:
homePicherStatus = yearTeamPitcherStatus.get(MatchResult[5].strip())
matchStatus += homePicherStatus
awayPicherStatus = yearPlayerPitcherStatus.get(MatchResult[28].strip())
if awayPicherStatus is None:
awayPicherStatus = yearTeamPitcherStatus.get(MatchResult[7].strip())
matchStatus += awayPicherStatus
for homeBatterIndex in range(9, 28):
homeBatterStatus = yearPlayerBatterStatus.get(MatchResult[homeBatterIndex].strip())
if homeBatterStatus is None:
homeBatterStatus = yearTeamBatterStatus.get(MatchResult[5].strip())
matchStatus += homeBatterStatus
for awayBatterIndex in range(29, 38):
awayBatterStatus = yearPlayerBatterStatus.get(MatchResult[awayBatterIndex].strip())
if awayBatterStatus is None:
awayBatterStatus = yearTeamBatterStatus.get(MatchResult[7].strip())
matchStatus += awayBatterStatus
matchStatus = [self.setFloat(x) for x in matchStatus]
allMatchStatus.append(matchStatus)
if len(allMatchStatus) == 0:
allMatchStatus = [[0]]
return np.array(allMatchStatus)
def oneHot(self,x):
result = pd.get_dummies(pd.Series(x)).values
return result
def setFloat(self, x):
if " -" in x:
if len(x) > 2:
result = float(x[2:]) * -1
else:
result = float(0)
else:
result = float(x)
return result
def saveInCsv(self, data):
with open(f'{self.dataFolder}/kbo_data.csv',"w") as f:
writer = csv.writer(f)
writer.writerows(list(data))
if __name__ == "__main__":
kboDataPreprocessing().start() | mit |
deeplook/bokeh | bokeh/compat/mpl.py | 32 | 2834 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from warnings import warn
import matplotlib.pyplot as plt
from .bokeh_exporter import BokehExporter
from .bokeh_renderer import BokehRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def to_bokeh(fig=None, name=None, server=None, notebook=None, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if name is not None:
warn("Use standard output_file(...) from bokeh.io")
if server is not None:
warn("Use standard output_server(...) from bokeh.io")
if notebook is not None:
warn("Use standard output_notebook() from bokeh.io")
if fig is None:
fig = plt.gcf()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
| bsd-3-clause |
lseman/pylspm | pylspm/boot.py | 1 | 7810 | # PyLS-PM bootstraping Library
# Author: Laio Oriel Seman
# Creation: November 2016
from multiprocessing import Pool, freeze_support
import pandas as pd
import numpy as np
from .pylspm import PyLSpm
import random
from scipy.stats.stats import pearsonr
class PyLSboot(object):
def do_work(self, item):
amostra = self.data.sample(
len(self.data), replace=True, random_state=(np.random.RandomState()))
# amostra.index = range(len(self.data))
# print(amostra)
try:
bootstraping = PyLSpm(amostra, self.LVcsv, self.Mcsv, self.scheme,
self.reg, self.h, self.maximo, self.stopCriterion)
if (bootstraping.convergiu == 1):
return [bootstraping.path_matrix.values]
except:
return None
def do_work_jk(self, item):
amostra = self.data.ix[self.indices[item]].reset_index(drop=True)
bootstraping = PyLSpm(amostra, self.LVcsv, self.Mcsv, self.scheme,
self.reg, self.h, self.maximo, self.stopCriterion)
if (bootstraping.convergiu == 1):
return [bootstraping.path_matrix.values]
def do_work_ga(self, item):
output = pd.DataFrame(self.population[item].genes)
output.columns = ['Split']
dataSplit = pd.concat([self.data, output], axis=1)
f1 = []
results = []
for i in range(self.nclusters):
dataSplited = (dataSplit.loc[dataSplit['Split']
== i]).drop('Split', axis=1)
dataSplited.index = range(len(dataSplited))
try:
results.append(PyLSpm(dataSplited, self.LVcsv, self.Mcsv, self.scheme,
self.reg, 0, 50, HOC='true'))
resid = results[i].residuals()[3]
f1.append(resid)
except:
f1.append(10000)
print((1 / np.sum(f1)))
return (1 / np.sum(f1))
def do_work_pso(self, item):
output = pd.DataFrame(self.population[item].position)
output.columns = ['Split']
dataSplit = pd.concat([self.data, output], axis=1)
f1 = []
results = []
for i in range(self.nclusters):
dataSplited = (dataSplit.loc[dataSplit['Split']
== i]).drop('Split', axis=1)
dataSplited.index = range(len(dataSplited))
try:
results.append(PyLSpm(dataSplited, self.LVcsv, self.Mcsv, self.scheme,
self.reg, 0, 50, HOC='true'))
resid = results[i].residuals()[3]
f1.append(resid)
except:
f1.append(10000)
print((1 / np.sum(f1)))
return (1 / np.sum(f1))
def do_work_tabu(self, item):
output = pd.DataFrame(self.population[item])
output.columns = ['Split']
dataSplit = pd.concat([self.data, output], axis=1)
f1 = []
results = []
for i in range(self.nclusters):
dataSplited = (dataSplit.loc[dataSplit['Split']
== i]).drop('Split', axis=1)
dataSplited.index = range(len(dataSplited))
try:
results.append(PyLSpm(dataSplited, self.LVcsv, self.Mcsv, self.scheme,
self.reg, 0, 50, HOC='true'))
resid = results[i].residuals()[3]
f1.append(resid)
except:
f1.append(10000)
cost = (np.sum(f1))
print(1 / cost)
return [self.population[item], cost]
def do_work_permuta(self, item):
node = np.zeros(self.lenT)
while np.count_nonzero(node) != self.leng2:
node[random.randint(0, self.lenT - 1)] = 1
output = pd.DataFrame(node)
output.columns = ['Split']
dataSplit = pd.concat([self.dataPermuta, output], axis=1)
results = []
f1 = []
f2 = []
f3 = []
try:
for i in range(2):
dataSplited = (dataSplit.loc[dataSplit['Split']
== i]).drop('Split', axis=1)
dataSplited.index = range(len(dataSplited))
results.append(PyLSpm(dataSplited, self.LVcsv, self.Mcsv,
self.scheme, self.reg, 0, 50, HOC='false'))
outer_weights = results[i].outer_weights
f1.append(outer_weights)
singleResult = PyLSpm(self.dataPermuta, self.LVcsv,
self.Mcsv, self.scheme, self.reg, 0, 50, HOC='false')
fscores = singleResult.fscores
for i in range(2):
f2_ = fscores.loc[(dataSplit['Split'] == i)]
f2.append(np.mean(f2_))
f3.append(np.var(f2_))
score1 = pd.DataFrame.dot(results[0].normaliza(dataSplited), f1[0])
score2 = pd.DataFrame.dot(results[0].normaliza(dataSplited), f1[1])
c = []
for i in range(len(score1.columns)):
c_ = np.corrcoef(score1.ix[:, i], score2.ix[:, i])
c.append(c_[0][1])
mean_diff = f2[0] - f2[1]
log_diff = np.log(f3[0]) - np.log(f3[1])
# print(log_diff.values)
return c, mean_diff, log_diff
except:
return None
def __init__(self, br, cores, dados, LVcsv, Mcsv, scheme='path', reg='ols', h=0, maximo=300, stopCrit=7, nclusters=2, population=None, g1=None, g2=None, segmento=None):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.maximo = maximo
self.stopCriterion = stopCrit
self.h = h
self.br = br
self.cores = cores
self.scheme = scheme
self.reg = reg
self.nclusters = nclusters
self.population = population
self.g1 = g1
self.g2 = g2
self.segmento = segmento
def boot(self):
p = Pool(self.cores)
result = p.map(self.do_work, range(self.br))
p.close()
p.join()
return result
def jk(self):
p = Pool(self.cores)
base = np.arange(0, len(self.data))
self.indices = list(np.delete(base, i) for i in base)
result = p.map(self.do_work_jk, range(self.br))
p.close()
p.join()
return result
def permuta(self):
self.dataPermuta = (self.data.loc[(self.data[self.segmento] == self.g1) | (
self.data[self.segmento] == self.g2)]).drop(self.segmento, axis=1)
self.dataPermuta.index = range(len(self.dataPermuta))
self.leng1 = len(self.data.loc[(self.data[self.segmento] == self.g1)])
self.leng2 = len(self.data.loc[(self.data[self.segmento] == self.g2)])
self.lenT = self.leng1 + self.leng2
p = Pool(self.cores)
result = p.map(self.do_work_permuta, range(self.br))
p.close()
p.join()
return result
def gac(self):
p = Pool(self.cores)
result = p.map(self.do_work_ga, range(self.br))
p.close()
p.join()
return result
def pso(self):
p = Pool(self.cores)
result = p.map(self.do_work_pso, range(self.br))
p.close()
p.join()
return result
def tabu(self):
p = Pool(self.cores)
result = p.map(self.do_work_tabu, range(self.br))
p.close()
p.join()
return result
| mit |
petteyg/intellij-community | python/helpers/pydev/pydevd.py | 15 | 100137 | #IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
from __future__ import nested_scopes # Jython 2.1 support
import pydev_monkey_qt
from pydevd_utils import save_main_module
pydev_monkey_qt.patch_qt()
import traceback
from pydevd_frame_utils import add_exception_to_frame
import pydev_imports
from pydevd_breakpoints import * #@UnusedWildImport
import fix_getpass
from pydevd_comm import CMD_CHANGE_VARIABLE, \
CMD_EVALUATE_EXPRESSION, \
CMD_EXEC_EXPRESSION, \
CMD_GET_COMPLETIONS, \
CMD_GET_FRAME, \
CMD_GET_VARIABLE, \
CMD_GET_ARRAY, \
CMD_LIST_THREADS, \
CMD_REMOVE_BREAK, \
CMD_RUN, \
CMD_SET_BREAK, \
CMD_SET_NEXT_STATEMENT,\
CMD_STEP_INTO, \
CMD_STEP_OVER, \
CMD_STEP_RETURN, \
CMD_STEP_INTO_MY_CODE, \
CMD_THREAD_KILL, \
CMD_THREAD_RUN, \
CMD_THREAD_SUSPEND, \
CMD_RUN_TO_LINE, \
CMD_RELOAD_CODE, \
CMD_VERSION, \
CMD_CONSOLE_EXEC, \
CMD_ADD_EXCEPTION_BREAK, \
CMD_REMOVE_EXCEPTION_BREAK, \
CMD_LOAD_SOURCE, \
CMD_ADD_DJANGO_EXCEPTION_BREAK, \
CMD_REMOVE_DJANGO_EXCEPTION_BREAK, \
CMD_SMART_STEP_INTO,\
InternalChangeVariable, \
InternalGetCompletions, \
InternalEvaluateExpression, \
InternalConsoleExec, \
InternalGetFrame, \
InternalGetVariable, \
InternalGetArray, \
InternalTerminateThread, \
InternalRunThread, \
InternalStepThread, \
NetCommandFactory, \
PyDBDaemonThread, \
_queue, \
ReaderThread, \
SetGlobalDebugger, \
WriterThread, \
PydevdFindThreadById, \
PydevdLog, \
StartClient, \
StartServer, \
InternalSetNextStatementThread, \
ReloadCodeCommand, \
CMD_SET_PY_EXCEPTION, \
CMD_IGNORE_THROWN_EXCEPTION_AT,\
InternalGetBreakpointException, \
InternalSendCurrExceptionTrace,\
InternalSendCurrExceptionTraceProceeded,\
CMD_ENABLE_DONT_TRACE, \
CMD_GET_FILE_CONTENTS,\
CMD_SET_PROPERTY_TRACE, CMD_RUN_CUSTOM_OPERATION,\
InternalRunCustomOperation, CMD_EVALUATE_CONSOLE_EXPRESSION, InternalEvaluateConsoleExpression,\
InternalConsoleGetCompletions
from pydevd_file_utils import NormFileToServer, GetFilenameAndBase
import pydevd_file_utils
import pydevd_vars
import pydevd_vm_type
import pydevd_tracing
import pydevd_io
from pydevd_additional_thread_info import PyDBAdditionalThreadInfo
from pydevd_custom_frames import CustomFramesContainer, CustomFramesContainerInit
import pydevd_dont_trace
import pydevd_traceproperty
from _pydev_imps import _pydev_time as time, _pydev_thread
import _pydev_threading as threading
import os
import atexit
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from pydevd_plugin_utils import PluginManager
if IS_PY3K:
import pkgutil
else:
from _pydev_imps import _pydev_pkgutil_old as pkgutil
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
LIB_FILE = 0
PYDEV_FILE = 1
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
# thirs party libs that we don't want to trace
'_pydev_pluginbase.py':PYDEV_FILE,
'_pydev_pkgutil_old.py':PYDEV_FILE,
'_pydev_uuid_old.py':PYDEV_FILE,
#things from pydev that we don't want to trace
'_pydev_execfile.py':PYDEV_FILE,
'_pydev_jython_execfile.py':PYDEV_FILE,
'_pydev_threading':PYDEV_FILE,
'_pydev_Queue':PYDEV_FILE,
'django_debug.py':PYDEV_FILE,
'jinja2_debug.py':PYDEV_FILE,
'pydev_log.py':PYDEV_FILE,
'pydev_monkey.py':PYDEV_FILE,
'pydev_monkey_qt.py':PYDEV_FILE,
'pydevd.py':PYDEV_FILE,
'pydevd_additional_thread_info.py':PYDEV_FILE,
'pydevd_breakpoints.py':PYDEV_FILE,
'pydevd_comm.py':PYDEV_FILE,
'pydevd_console.py':PYDEV_FILE,
'pydevd_constants.py':PYDEV_FILE,
'pydevd_custom_frames.py':PYDEV_FILE,
'pydevd_dont_trace.py':PYDEV_FILE,
'pydevd_exec.py':PYDEV_FILE,
'pydevd_exec2.py':PYDEV_FILE,
'pydevd_file_utils.py':PYDEV_FILE,
'pydevd_frame.py':PYDEV_FILE,
'pydevd_import_class.py':PYDEV_FILE,
'pydevd_io.py':PYDEV_FILE,
'pydevd_psyco_stub.py':PYDEV_FILE,
'pydevd_referrers.py':PYDEV_FILE,
'pydevd_reload.py':PYDEV_FILE,
'pydevd_resolver.py':PYDEV_FILE,
'pydevd_save_locals.py':PYDEV_FILE,
'pydevd_signature.py':PYDEV_FILE,
'pydevd_stackless.py':PYDEV_FILE,
'pydevd_traceproperty.py':PYDEV_FILE,
'pydevd_tracing.py':PYDEV_FILE,
'pydevd_utils.py':PYDEV_FILE,
'pydevd_vars.py':PYDEV_FILE,
'pydevd_vm_type.py':PYDEV_FILE,
'pydevd_xml.py':PYDEV_FILE,
}
if IS_PY3K:
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
connected = False
bufferStdOutToServer = False
bufferStdErrToServer = False
remote = False
from _pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
# Hack for https://sw-brainwy.rhcloud.com/tracker/PyDev/363 (i.e.: calling isAlive() can throw AssertionError under some circumstances)
# It is required to debug threads started by start_new_thread in Python 3.4
_temp = threading.Thread()
if hasattr(_temp, '_is_stopped'): # Python 3.4 has this
def isThreadAlive(t):
try:
return not t._is_stopped
except:
return t.isAlive()
elif hasattr(_temp, '_Thread__stopped'): # Python 2.7 has this
def isThreadAlive(t):
try:
return not t._Thread__stopped
except:
return t.isAlive()
else: # Haven't checked all other versions, so, let's use the regular isAlive call in this case.
def isThreadAlive(t):
return t.isAlive()
del _temp
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, pyDb):
PyDBDaemonThread.__init__(self)
self._py_db_command_thread_event = pyDb._py_db_command_thread_event
self.pyDb = pyDb
self.setName('pydevd.CommandThread')
def OnRun(self):
for i in xrange(1, 10):
time.sleep(0.5) #this one will only start later on (because otherwise we may not have any non-daemon threads
if self.killReceived:
return
if self.dontTraceMe:
self.pyDb.SetTrace(None) # no debugging on this thread
try:
while not self.killReceived:
try:
self.pyDb.processInternalCommands()
except:
PydevdLog(0, 'Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.5)
except:
pydev_log.debug(sys.exc_info()[0])
#only got this error in interpreter shutdown
#PydevdLog(0, 'Finishing debug communication...(3)')
def killAllPydevThreads():
threads = DictKeys(PyDBDaemonThread.created_pydb_daemon_threads)
for t in threads:
if hasattr(t, 'doKillPydevThread'):
t.doKillPydevThread()
#=======================================================================================================================
# CheckOutputThread
# Non-daemonic thread guaranties that all data is written even if program is finished
#=======================================================================================================================
class CheckOutputThread(PyDBDaemonThread):
def __init__(self, pyDb):
PyDBDaemonThread.__init__(self)
self.pyDb = pyDb
self.setName('pydevd.CheckAliveThread')
self.daemon = False
pyDb.output_checker = self
def OnRun(self):
if self.dontTraceMe:
disable_tracing = True
if pydevd_vm_type.GetVmType() == pydevd_vm_type.PydevdVmType.JYTHON and sys.hexversion <= 0x020201f0:
# don't run untraced threads if we're in jython 2.2.1 or lower
# jython bug: if we start a thread and another thread changes the tracing facility
# it affects other threads (it's not set only for the thread but globally)
# Bug: http://sourceforge.net/tracker/index.php?func=detail&aid=1870039&group_id=12867&atid=112867
disable_tracing = False
if disable_tracing:
pydevd_tracing.SetTrace(None) # no debugging on this thread
while not self.killReceived:
time.sleep(0.3)
if not self.pyDb.haveAliveThreads() and self.pyDb.writer.empty() \
and not has_data_to_redirect():
try:
pydev_log.debug("No alive threads, finishing debug session")
self.pyDb.FinishDebuggingSession()
killAllPydevThreads()
except:
traceback.print_exc()
self.killReceived = True
self.pyDb.checkOutputRedirect()
def doKillPydevThread(self):
self.killReceived = True
#=======================================================================================================================
# PyDB
#=======================================================================================================================
class PyDB:
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling processNetCommand.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
def __init__(self):
SetGlobalDebugger(self)
pydevd_tracing.ReplaceSysSetTraceFunc()
self.reader = None
self.writer = None
self.output_checker = None
self.quitting = None
self.cmdFactory = NetCommandFactory()
self._cmd_queue = {} # the hash of Queues. Key is thread id, value is thread
self.breakpoints = {}
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.readyToRun = False
self._main_lock = _pydev_thread.allocate_lock()
self._lock_running_thread_ids = _pydev_thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self._finishDebuggingSession = False
self._terminationEventSent = False
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.break_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
self.project_roots = None
# Suspend debugger even if breakpoint condition raises an exception
SUSPEND_ON_BREAKPOINT_EXCEPTION = True
self.suspend_on_breakpoint_exception = SUSPEND_ON_BREAKPOINT_EXCEPTION
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
#this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
#acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
#find that thread alive anymore, we must remove it from this list and make the java side know that the thread
#was killed.
self._running_thread_ids = {}
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
#working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def get_project_roots(self):
if self.project_roots is None:
roots = os.getenv('IDE_PROJECT_ROOTS', '').split(os.pathsep)
pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % roots)
self.project_roots = roots
def not_in_scope(self, filename):
self.get_project_roots()
filename = os.path.normcase(filename)
for root in self.project_roots:
root = os.path.normcase(root)
if filename.startswith(root):
return False
return True
def first_appearance_in_scope(self, trace):
if trace is None or self.not_in_scope(trace.tb_frame.f_code.co_filename):
return False
else:
trace = trace.tb_next
while trace is not None:
frame = trace.tb_frame
if not self.not_in_scope(frame.f_code.co_filename):
return False
trace = trace.tb_next
return True
def haveAliveThreads(self):
for t in threadingEnumerate():
if getattr(t, 'is_pydev_daemon_thread', False):
#Important: Jython 2.5rc4 has a bug where a thread created with thread.start_new_thread won't be
#set as a daemon thread, so, we also have to check for the 'is_pydev_daemon_thread' flag.
#See: https://github.com/fabioz/PyDev.Debugger/issues/11
continue
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if isThreadAlive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def FinishDebuggingSession(self):
self._finishDebuggingSession = True
def acquire(self):
if PyDBUseLocks:
self.lock.acquire()
return True
def release(self):
if PyDBUseLocks:
self.lock.release()
return True
def initializeNetwork(self, sock):
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
self.writer = WriterThread(sock)
self.reader = ReaderThread(sock)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = StartClient(host, port)
else:
s = StartServer(port)
self.initializeNetwork(s)
def getInternalQueue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
try:
return self._cmd_queue[thread_id]
except KeyError:
return self._cmd_queue.setdefault(thread_id, _queue.Queue()) #@UndefinedVariable
def postInternalCommand(self, int_cmd, thread_id):
""" if thread_id is *, post to all """
if thread_id == "*":
threads = threadingEnumerate()
for t in threads:
thread_id = GetThreadId(t)
queue = self.getInternalQueue(thread_id)
queue.put(int_cmd)
else:
queue = self.getInternalQueue(thread_id)
queue.put(int_cmd)
def checkOutputRedirect(self):
global bufferStdOutToServer
global bufferStdErrToServer
if bufferStdOutToServer:
initStdoutRedirect()
self.checkOutput(sys.stdoutBuf, 1) #@UndefinedVariable
if bufferStdErrToServer:
initStderrRedirect()
self.checkOutput(sys.stderrBuf, 2) #@UndefinedVariable
def checkOutput(self, out, outCtx):
'''Checks the output to see if we have to send some buffered output to the debug server
@param out: sys.stdout or sys.stderr
@param outCtx: the context indicating: 1=stdout and 2=stderr (to know the colors to write it)
'''
try:
v = out.getvalue()
if v:
self.cmdFactory.makeIoMessage(v, outCtx, self)
except:
traceback.print_exc()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from pydev_import_hook import import_hook_manager
for module in DictKeys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, DictPop(self.mpl_modules_for_patching, module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def processInternalCommands(self):
'''This function processes internal commands
'''
self._main_lock.acquire()
try:
self.checkOutputRedirect()
curr_thread_id = GetThreadId(threadingCurrentThread())
program_threads_alive = {}
all_threads = threadingEnumerate()
program_threads_dead = []
self._lock_running_thread_ids.acquire()
try:
for t in all_threads:
thread_id = GetThreadId(t)
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
elif isThreadAlive(t):
program_threads_alive[thread_id] = t
if not DictContains(self._running_thread_ids, thread_id):
if not hasattr(t, 'additionalInfo'):
# see http://sourceforge.net/tracker/index.php?func=detail&aid=1955428&group_id=85796&atid=577329
# Let's create the additional info right away!
t.additionalInfo = PyDBAdditionalThreadInfo()
self._running_thread_ids[thread_id] = t
self.writer.addCommand(self.cmdFactory.makeThreadCreatedMessage(t))
queue = self.getInternalQueue(thread_id)
cmdsToReadd = [] # some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
PydevdLog(2, "Matplotlib support in debug console failed", traceback.format_exc())
finally:
self.mpl_hooks_in_debug_console = True
if int_cmd.canBeExecutedBy(curr_thread_id):
PydevdLog(2, "processing internal command ", str(int_cmd))
int_cmd.doIt(self)
else:
PydevdLog(2, "NOT processing internal command ", str(int_cmd))
cmdsToReadd.append(int_cmd)
except _queue.Empty: #@UndefinedVariable
for int_cmd in cmdsToReadd:
queue.put(int_cmd)
# this is how we exit
thread_ids = list(self._running_thread_ids.keys())
for tId in thread_ids:
if not DictContains(program_threads_alive, tId):
program_threads_dead.append(tId)
finally:
self._lock_running_thread_ids.release()
for tId in program_threads_dead:
try:
self.processThreadNotAlive(tId)
except:
sys.stderr.write('Error iterating through %s (%s) - %s\n' % (
program_threads_alive, program_threads_alive.__class__, dir(program_threads_alive)))
raise
if len(program_threads_alive) == 0:
self.FinishDebuggingSession()
for t in all_threads:
if hasattr(t, 'doKillPydevThread'):
t.doKillPydevThread()
finally:
self._main_lock.release()
def setTracingForUntracedContexts(self, ignore_frame=None, overwrite_prev_trace=False):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
threads = threadingEnumerate()
try:
for t in threads:
# TODO: optimize so that we only actually add that tracing if it's in
# the new breakpoint context.
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except AttributeError:
pass # that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
if frame is not ignore_frame:
self.SetTraceForFrameAndParents(frame, overwrite_prev_trace=overwrite_prev_trace)
finally:
frame = None
t = None
threads = None
additionalInfo = None
def consolidate_breakpoints(self, file, id_to_breakpoint, breakpoints):
break_dict = {}
for breakpoint_id, pybreakpoint in DictIterItems(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[file] = break_dict
def add_break_on_exception(
self,
exception,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
notify_always,
notify_on_terminate,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.error("Error unable to add break on exception for: %s (exception could not be imported)\n" % (exception,))
return None
if eb.notify_on_terminate:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook on terminate: %s\n" % (cp,))
self.break_on_uncaught_exceptions = cp
if eb.notify_always:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.error("Exceptions to hook always: %s\n" % (cp,))
self.break_on_caught_exceptions = cp
return eb
def update_after_exceptions_added(self, added):
updated_on_caught = False
updated_on_uncaught = False
for eb in added:
if not updated_on_uncaught and eb.notify_on_terminate:
updated_on_uncaught = True
update_exception_hook(self)
if not updated_on_caught and eb.notify_always:
updated_on_caught = True
self.setTracingForUntracedContexts()
def processNetCommand(self, cmd_id, seq, text):
'''Processes a command received from the Java side
@param cmd_id: the id of the command
@param seq: the sequence of the command
@param text: the text received in the command
@note: this method is run as a big switch... after doing some tests, it's not clear whether changing it for
a dict id --> function call will have better performance result. A simple test with xrange(10000000) showed
that the gains from having a fast access to what should be executed are lost because of the function call in
a way that if we had 10 elements in the switch the if..elif are better -- but growing the number of choices
makes the solution with the dispatch look better -- so, if this gets more than 20-25 choices at some time,
it may be worth refactoring it (actually, reordering the ifs so that the ones used mostly come before
probably will give better performance).
'''
#print(ID_TO_MEANING[str(cmd_id)], repr(text))
self._main_lock.acquire()
try:
try:
cmd = None
if cmd_id == CMD_RUN:
self.readyToRun = True
elif cmd_id == CMD_VERSION:
# response is version number
# ide_os should be 'WINDOWS' or 'UNIX'.
ide_os = 'WINDOWS'
# Breakpoints can be grouped by 'LINE' or by 'ID'.
breakpoints_by = 'LINE'
splitted = text.split('\t')
if len(splitted) == 1:
_local_version = splitted
elif len(splitted) == 2:
_local_version, ide_os = splitted
elif len(splitted) == 3:
_local_version, ide_os, breakpoints_by = splitted
if breakpoints_by == 'ID':
self._set_breakpoints_with_id = True
else:
self._set_breakpoints_with_id = False
pydevd_file_utils.set_ide_os(ide_os)
cmd = self.cmdFactory.makeVersionMessage(seq)
elif cmd_id == CMD_LIST_THREADS:
# response is a list of threads
cmd = self.cmdFactory.makeListThreadsMessage(seq)
elif cmd_id == CMD_THREAD_KILL:
int_cmd = InternalTerminateThread(text)
self.postInternalCommand(int_cmd, text)
elif cmd_id == CMD_THREAD_SUSPEND:
# Yes, thread suspend is still done at this point, not through an internal command!
t = PydevdFindThreadById(text)
if t:
additionalInfo = None
try:
additionalInfo = t.additionalInfo
except AttributeError:
pass # that's ok, no info currently set
if additionalInfo is not None:
for frame in additionalInfo.IterFrames():
self.SetTraceForFrameAndParents(frame)
del frame
self.setSuspend(t, CMD_THREAD_SUSPEND)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't suspend tasklet: %s\n" % (text,))
elif cmd_id == CMD_THREAD_RUN:
t = PydevdFindThreadById(text)
if t:
thread_id = GetThreadId(t)
int_cmd = InternalRunThread(thread_id)
self.postInternalCommand(int_cmd, thread_id)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet run: %s\n" % (text,))
elif cmd_id == CMD_STEP_INTO or cmd_id == CMD_STEP_OVER or cmd_id == CMD_STEP_RETURN or \
cmd_id == CMD_STEP_INTO_MY_CODE:
# we received some command to make a single step
t = PydevdFindThreadById(text)
if t:
thread_id = GetThreadId(t)
int_cmd = InternalStepThread(thread_id, cmd_id)
self.postInternalCommand(int_cmd, thread_id)
elif text.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet step command: %s\n" % (text,))
elif cmd_id == CMD_RUN_TO_LINE or cmd_id == CMD_SET_NEXT_STATEMENT or cmd_id == CMD_SMART_STEP_INTO:
# we received some command to make a single step
thread_id, line, func_name = text.split('\t', 2)
t = PydevdFindThreadById(thread_id)
if t:
int_cmd = InternalSetNextStatementThread(thread_id, cmd_id, line, func_name)
self.postInternalCommand(int_cmd, thread_id)
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't set next statement in tasklet: %s\n" % (thread_id,))
elif cmd_id == CMD_RELOAD_CODE:
# we received some command to make a reload of a module
module_name = text.strip()
thread_id = '*' # Any thread
# Note: not going for the main thread because in this case it'd only do the load
# when we stopped on a breakpoint.
# for tid, t in self._running_thread_ids.items(): #Iterate in copy
# thread_name = t.getName()
#
# print thread_name, GetThreadId(t)
# #Note: if possible, try to reload on the main thread
# if thread_name == 'MainThread':
# thread_id = tid
int_cmd = ReloadCodeCommand(module_name, thread_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_CHANGE_VARIABLE:
# the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
try:
thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)
tab_index = attr_and_value.rindex('\t')
attr = attr_and_value[0:tab_index].replace('\t', '.')
value = attr_and_value[tab_index + 1:]
int_cmd = InternalChangeVariable(seq, thread_id, frame_id, scope, attr, value)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_VARIABLE:
# we received some command to get a variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
try:
thread_id, frame_id, scopeattrs = text.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_ARRAY:
# we received some command to get an array variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tname\ttemp\troffs\tcoffs\trows\tcols\tformat
try:
roffset, coffset, rows, cols, format, thread_id, frame_id, scopeattrs = text.split('\t', 7)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
int_cmd = InternalGetArray(seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_COMPLETIONS:
# we received some command to get a variable
# the text is: thread_id\tframe_id\tactivation token
try:
thread_id, frame_id, scope, act_tok = text.split('\t', 3)
int_cmd = InternalGetCompletions(seq, thread_id, frame_id, act_tok)
self.postInternalCommand(int_cmd, thread_id)
except:
traceback.print_exc()
elif cmd_id == CMD_GET_FRAME:
thread_id, frame_id, scope = text.split('\t', 2)
int_cmd = InternalGetFrame(seq, thread_id, frame_id)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_BREAK:
# func name: 'None': match anything. Empty: match global, specified: only method context.
# command to add some breakpoint.
# text is file\tline. Add to breakpoints dictionary
if self._set_breakpoints_with_id:
breakpoint_id, type, file, line, func_name, condition, expression = text.split('\t', 6)
breakpoint_id = int(breakpoint_id)
line = int(line)
# We must restore new lines and tabs as done in
# AbstractDebugTarget.breakpointAdded
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').\
replace("@_@TAB_CHAR@_@", '\t').strip()
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n').\
replace("@_@TAB_CHAR@_@", '\t').strip()
else:
#Note: this else should be removed after PyCharm migrates to setting
#breakpoints by id (and ideally also provides func_name).
type, file, line, func_name, condition, expression = text.split('\t', 5)
# If we don't have an id given for each breakpoint, consider
# the id to be the line.
breakpoint_id = line = int(line)
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n'). \
replace("@_@TAB_CHAR@_@", '\t').strip()
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
file = file.encode(file_system_encoding)
file = NormFileToServer(file)
if not pydevd_file_utils.exists(file):
sys.stderr.write('pydev debugger: warning: trying to add breakpoint'\
' to file that does not exist: %s (will have no effect)\n' % (file,))
sys.stderr.flush()
if len(condition) <= 0 or condition is None or condition == "None":
condition = None
if len(expression) <= 0 or expression is None or expression == "None":
expression = None
supported_type = False
if type == 'python-line':
breakpoint = LineBreakpoint(line, condition, func_name, expression)
breakpoints = self.breakpoints
file_to_id_to_breakpoint = self.file_to_id_to_line_breakpoint
supported_type = True
else:
result = None
plugin = self.get_plugin_lazy_init()
if plugin is not None:
result = plugin.add_breakpoint('add_line_breakpoint', self, type, file, line, condition, expression, func_name)
if result is not None:
supported_type = True
breakpoint, breakpoints = result
file_to_id_to_breakpoint = self.file_to_id_to_plugin_breakpoint
else:
supported_type = False
if not supported_type:
raise NameError(type)
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.debug('Added breakpoint:%s - line:%s - func_name:%s\n' % (file, line, func_name.encode('utf-8')))
sys.stderr.flush()
if DictContains(file_to_id_to_breakpoint, file):
id_to_pybreakpoint = file_to_id_to_breakpoint[file]
else:
id_to_pybreakpoint = file_to_id_to_breakpoint[file] = {}
id_to_pybreakpoint[breakpoint_id] = breakpoint
self.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
if self.plugin is not None:
self.has_plugin_line_breaks = self.plugin.has_line_breaks()
self.setTracingForUntracedContexts(overwrite_prev_trace=True)
elif cmd_id == CMD_REMOVE_BREAK:
#command to remove some breakpoint
#text is type\file\tid. Remove from breakpoints dictionary
breakpoint_type, file, breakpoint_id = text.split('\t', 2)
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
file = file.encode(file_system_encoding)
file = NormFileToServer(file)
try:
breakpoint_id = int(breakpoint_id)
except ValueError:
pydev_log.error('Error removing breakpoint. Expected breakpoint_id to be an int. Found: %s' % (breakpoint_id,))
else:
file_to_id_to_breakpoint = None
if breakpoint_type == 'python-line':
breakpoints = self.breakpoints
file_to_id_to_breakpoint = self.file_to_id_to_line_breakpoint
elif self.get_plugin_lazy_init() is not None:
result = self.plugin.get_breakpoints(self, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = self.file_to_id_to_plugin_breakpoint
breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.error('Error removing breakpoint. Cant handle breakpoint of type %s' % breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(file, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
sys.stderr.write('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
file, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
self.consolidate_breakpoints(file, id_to_pybreakpoint, breakpoints)
if self.plugin is not None:
self.has_plugin_line_breaks = self.plugin.has_line_breaks()
except KeyError:
pydev_log.error("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n" % (
file, breakpoint_id, DictKeys(id_to_pybreakpoint)))
elif cmd_id == CMD_EVALUATE_EXPRESSION or cmd_id == CMD_EXEC_EXPRESSION:
#command to evaluate the given expression
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression, trim = text.split('\t', 4)
int_cmd = InternalEvaluateExpression(seq, thread_id, frame_id, expression,
cmd_id == CMD_EXEC_EXPRESSION, int(trim) == 1)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_CONSOLE_EXEC:
#command to exec expression in console, in case expression is only partially valid 'False' is returned
#text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression = text.split('\t', 3)
int_cmd = InternalConsoleExec(seq, thread_id, frame_id, expression)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_SET_PY_EXCEPTION:
# Command which receives set of exceptions on which user wants to break the debugger
# text is: break_on_uncaught;break_on_caught;TypeError;ImportError;zipimport.ZipImportError;
# This API is optional and works 'in bulk' -- it's possible
# to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK
# which allows setting caught/uncaught per exception.
#
splitted = text.split(';')
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
added = []
if len(splitted) >= 4:
if splitted[0] == 'true':
break_on_uncaught = True
else:
break_on_uncaught = False
if splitted[1] == 'true':
break_on_caught = True
else:
break_on_caught = False
if splitted[2] == 'true':
self.break_on_exceptions_thrown_in_same_context = True
else:
self.break_on_exceptions_thrown_in_same_context = False
if splitted[3] == 'true':
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
else:
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = False
for exception_type in splitted[4:]:
exception_type = exception_type.strip()
if not exception_type:
continue
exception_breakpoint = self.add_break_on_exception(
exception_type,
notify_always=break_on_caught,
notify_on_terminate=break_on_uncaught,
notify_on_first_raise_only=False,
)
if exception_breakpoint is None:
continue
added.append(exception_breakpoint)
self.update_after_exceptions_added(added)
else:
sys.stderr.write("Error when setting exception list. Received: %s\n" % (text,))
elif cmd_id == CMD_GET_FILE_CONTENTS:
if not IS_PY3K: # In Python 3, the frame object will have unicode for the file, whereas on python 2 it has a byte-array encoded with the filesystem encoding.
text = text.encode(file_system_encoding)
if os.path.exists(text):
f = open(text, 'r')
try:
source = f.read()
finally:
f.close()
cmd = self.cmdFactory.makeGetFileContents(seq, source)
elif cmd_id == CMD_SET_PROPERTY_TRACE:
# Command which receives whether to trace property getter/setter/deleter
# text is feature_state(true/false);disable_getter/disable_setter/disable_deleter
if text != "":
splitted = text.split(';')
if len(splitted) >= 3:
if self.disable_property_trace is False and splitted[0] == 'true':
# Replacing property by custom property only when the debugger starts
pydevd_traceproperty.replace_builtin_property()
self.disable_property_trace = True
# Enable/Disable tracing of the property getter
if splitted[1] == 'true':
self.disable_property_getter_trace = True
else:
self.disable_property_getter_trace = False
# Enable/Disable tracing of the property setter
if splitted[2] == 'true':
self.disable_property_setter_trace = True
else:
self.disable_property_setter_trace = False
# Enable/Disable tracing of the property deleter
if splitted[3] == 'true':
self.disable_property_deleter_trace = True
else:
self.disable_property_deleter_trace = False
else:
# User hasn't configured any settings for property tracing
pass
elif cmd_id == CMD_ADD_EXCEPTION_BREAK:
if text.find('\t') != -1:
exception, notify_always, notify_on_terminate, ignore_libraries = text.split('\t', 3)
else:
exception, notify_always, notify_on_terminate, ignore_libraries = text, 0, 0, 0
if exception.find('-') != -1:
type, exception = exception.split('-')
else:
type = 'python'
if type == 'python':
if int(notify_always) == 1:
pydev_log.warn("Deprecated parameter: 'notify always' policy removed in PyCharm\n")
exception_breakpoint = self.add_break_on_exception(
exception,
notify_always=int(notify_always) > 0,
notify_on_terminate = int(notify_on_terminate) == 1,
notify_on_first_raise_only=int(notify_always) == 2,
ignore_libraries=int(ignore_libraries) > 0
)
if exception_breakpoint is not None:
self.update_after_exceptions_added([exception_breakpoint])
else:
supported_type = False
plugin = self.get_plugin_lazy_init()
if plugin is not None:
supported_type = plugin.add_breakpoint('add_exception_breakpoint', self, type, exception)
if supported_type:
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
else:
raise NameError(type)
elif cmd_id == CMD_REMOVE_EXCEPTION_BREAK:
exception = text
if exception.find('-') != -1:
type, exception = exception.split('-')
else:
type = 'python'
if type == 'python':
try:
cp = self.break_on_uncaught_exceptions.copy()
DictPop(cp, exception, None)
self.break_on_uncaught_exceptions = cp
cp = self.break_on_caught_exceptions.copy()
DictPop(cp, exception, None)
self.break_on_caught_exceptions = cp
except:
pydev_log.debug("Error while removing exception %s"%sys.exc_info()[0])
update_exception_hook(self)
else:
supported_type = False
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = self.plugin
if plugin is not None:
supported_type = plugin.remove_exception_breakpoint(self, type, exception)
if supported_type:
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
else:
raise NameError(type)
elif cmd_id == CMD_LOAD_SOURCE:
path = text
try:
f = open(path, 'r')
source = f.read()
self.cmdFactory.makeLoadSourceMessage(seq, source, self)
except:
return self.cmdFactory.makeErrorMessage(seq, pydevd_tracing.GetExceptionTracebackStr())
elif cmd_id == CMD_ADD_DJANGO_EXCEPTION_BREAK:
exception = text
plugin = self.get_plugin_lazy_init()
if plugin is not None:
plugin.add_breakpoint('add_exception_breakpoint', self, 'django', exception)
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
elif cmd_id == CMD_REMOVE_DJANGO_EXCEPTION_BREAK:
exception = text
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = self.plugin
if plugin is not None:
plugin.remove_exception_breakpoint(self, 'django', exception)
self.has_plugin_exception_breaks = self.plugin.has_exception_breaks()
elif cmd_id == CMD_EVALUATE_CONSOLE_EXPRESSION:
# Command which takes care for the debug console communication
if text != "":
thread_id, frame_id, console_command = text.split('\t', 2)
console_command, line = console_command.split('\t')
if console_command == 'EVALUATE':
int_cmd = InternalEvaluateConsoleExpression(seq, thread_id, frame_id, line)
elif console_command == 'GET_COMPLETIONS':
int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_RUN_CUSTOM_OPERATION:
# Command which runs a custom operation
if text != "":
try:
location, custom = text.split('||', 1)
except:
sys.stderr.write('Custom operation now needs a || separator. Found: %s\n' % (text,))
raise
thread_id, frame_id, scopeattrs = location.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
# : style: EXECFILE or EXEC
# : encoded_code_or_file: file to execute or code
# : fname: name of function to be executed in the resulting namespace
style, encoded_code_or_file, fnname = custom.split('\t', 3)
int_cmd = InternalRunCustomOperation(seq, thread_id, frame_id, scope, attrs,
style, encoded_code_or_file, fnname)
self.postInternalCommand(int_cmd, thread_id)
elif cmd_id == CMD_IGNORE_THROWN_EXCEPTION_AT:
if text:
replace = 'REPLACE:' # Not all 3.x versions support u'REPLACE:', so, doing workaround.
if not IS_PY3K:
replace = unicode(replace)
if text.startswith(replace):
text = text[8:]
self.filename_to_lines_where_exceptions_are_ignored.clear()
if text:
for line in text.split('||'): # Can be bulk-created (one in each line)
filename, line_number = line.split('|')
if not IS_PY3K:
filename = filename.encode(file_system_encoding)
filename = NormFileToServer(filename)
if os.path.exists(filename):
lines_ignored = self.filename_to_lines_where_exceptions_are_ignored.get(filename)
if lines_ignored is None:
lines_ignored = self.filename_to_lines_where_exceptions_are_ignored[filename] = {}
lines_ignored[int(line_number)] = 1
else:
sys.stderr.write('pydev debugger: warning: trying to ignore exception thrown'\
' on file that does not exist: %s (will have no effect)\n' % (filename,))
elif cmd_id == CMD_ENABLE_DONT_TRACE:
if text:
true_str = 'true' # Not all 3.x versions support u'str', so, doing workaround.
if not IS_PY3K:
true_str = unicode(true_str)
mode = text.strip() == true_str
pydevd_dont_trace.trace_filter(mode)
else:
#I have no idea what this is all about
cmd = self.cmdFactory.makeErrorMessage(seq, "unexpected command " + str(cmd_id))
if cmd is not None:
self.writer.addCommand(cmd)
del cmd
except Exception:
traceback.print_exc()
cmd = self.cmdFactory.makeErrorMessage(seq,
"Unexpected exception in processNetCommand.\nInitial params: %s" % ((cmd_id, seq, text),))
self.writer.addCommand(cmd)
finally:
self._main_lock.release()
def processThreadNotAlive(self, threadId):
""" if thread is not alive, cancel trace_dispatch processing """
self._lock_running_thread_ids.acquire()
try:
thread = self._running_thread_ids.pop(threadId, None)
if thread is None:
return
wasNotified = thread.additionalInfo.pydev_notify_kill
if not wasNotified:
thread.additionalInfo.pydev_notify_kill = True
finally:
self._lock_running_thread_ids.release()
cmd = self.cmdFactory.makeThreadKilledMessage(threadId)
self.writer.addCommand(cmd)
def setSuspend(self, thread, stop_reason):
thread.additionalInfo.suspend_type = PYTHON_SUSPEND
thread.additionalInfo.pydev_state = STATE_SUSPEND
thread.stop_reason = stop_reason
# If conditional breakpoint raises any exception during evaluation send details to Java
if stop_reason == CMD_SET_BREAK and self.suspend_on_breakpoint_exception:
self.sendBreakpointConditionException(thread)
def sendBreakpointConditionException(self, thread):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = GetThreadId(thread)
conditional_breakpoint_exception_tuple = thread.additionalInfo.conditional_breakpoint_exception
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
# Reset the conditional_breakpoint_exception details to None
thread.additionalInfo.conditional_breakpoint_exception = None
self.postInternalCommand(int_cmd, thread_id)
def sendCaughtExceptionStack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = GetThreadId(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.postInternalCommand(int_cmd, thread_id)
def sendCaughtExceptionStackProceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = GetThreadId(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.postInternalCommand(int_cmd, thread_id)
self.processInternalCommands()
def doWaitSuspend(self, thread, frame, event, arg): #@UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
"""
self.processInternalCommands()
message = getattr(thread.additionalInfo, "message", None)
cmd = self.cmdFactory.makeThreadSuspendMessage(GetThreadId(thread), frame, thread.stop_reason, message)
self.writer.addCommand(cmd)
CustomFramesContainer.custom_frames_lock.acquire()
try:
from_this_thread = []
for frame_id, custom_frame in DictIterItems(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
# print >> sys.stderr, 'Frame created: ', frame_id
self.writer.addCommand(self.cmdFactory.makeCustomFrameCreatedMessage(frame_id, custom_frame.name))
self.writer.addCommand(self.cmdFactory.makeThreadSuspendMessage(frame_id, custom_frame.frame, CMD_THREAD_SUSPEND, ""))
from_this_thread.append(frame_id)
finally:
CustomFramesContainer.custom_frames_lock.release()
imported = False
info = thread.additionalInfo
if info.pydev_state == STATE_SUSPEND and not self._finishDebuggingSession:
# before every stop check if matplotlib modules were imported inside script code
if len(self.mpl_modules_for_patching) > 0:
for module in DictKeys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = DictPop(self.mpl_modules_for_patching, module)
activate_function()
self.mpl_in_use = True
while info.pydev_state == STATE_SUSPEND and not self._finishDebuggingSession:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
try:
if not imported:
from pydev_ipython.inputhook import get_inputhook
imported = True
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
self.processInternalCommands()
time.sleep(0.01)
# process any stepping instructions
if info.pydev_step_cmd == CMD_STEP_INTO or info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE:
info.pydev_step_stop = None
info.pydev_smart_step_stop = None
elif info.pydev_step_cmd == CMD_STEP_OVER:
info.pydev_step_stop = frame
info.pydev_smart_step_stop = None
self.SetTraceForFrameAndParents(frame)
elif info.pydev_step_cmd == CMD_SMART_STEP_INTO:
self.SetTraceForFrameAndParents(frame)
info.pydev_step_stop = None
info.pydev_smart_step_stop = frame
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT :
self.SetTraceForFrameAndParents(frame)
if event == 'line' or event == 'exception':
#If we're already in the correct context, we have to stop it now, because we can act only on
#line events -- if a return was the next statement it wouldn't work (so, we have this code
#repeated at pydevd_frame).
stop = False
curr_func_name = frame.f_code.co_name
#global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if curr_func_name == info.pydev_func_name:
line = info.pydev_next_line
if frame.f_lineno == line:
stop = True
else :
if frame.f_trace is None:
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
frame.f_trace = None
stop = True
if stop:
info.pydev_state = STATE_SUSPEND
self.doWaitSuspend(thread, frame, event, arg)
return
elif info.pydev_step_cmd == CMD_STEP_RETURN:
back_frame = frame.f_back
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.SetTraceForFrameAndParents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_step_cmd = None
info.pydev_state = STATE_RUN
del frame
cmd = self.cmdFactory.makeThreadRunMessage(GetThreadId(thread), info.pydev_step_cmd)
self.writer.addCommand(cmd)
CustomFramesContainer.custom_frames_lock.acquire()
try:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print >> sys.stderr, 'Removing created frame: ', frame_id
self.writer.addCommand(self.cmdFactory.makeThreadKilledMessage(frame_id))
finally:
CustomFramesContainer.custom_frames_lock.release()
def handle_post_mortem_stop(self, additionalInfo, t):
pydev_log.debug("We are stopping in post-mortem\n")
frame, frames_byid = additionalInfo.pydev_force_stop_at_exception
thread_id = GetThreadId(t)
pydevd_vars.addAdditionalFrameById(thread_id, frames_byid)
try:
try:
add_exception_to_frame(frame, additionalInfo.exception)
self.setSuspend(t, CMD_ADD_EXCEPTION_BREAK)
self.doWaitSuspend(t, frame, 'exception', None)
except:
pydev_log.error("We've got an error while stopping in post-mortem: %s\n"%sys.exc_info()[0])
finally:
additionalInfo.pydev_force_stop_at_exception = None
pydevd_vars.removeAdditionalFrameById(thread_id)
def trace_dispatch(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
'''
try:
if self._finishDebuggingSession and not self._terminationEventSent:
#that was not working very well because jython gave some socket errors
try:
if self.output_checker is None:
killAllPydevThreads()
except:
traceback.print_exc()
self._terminationEventSent = True
return None
filename, base = GetFilenameAndBase(frame)
is_file_to_ignore = DictContains(DONT_TRACE, base) #we don't want to debug threading or anything related to pydevd
#print('trace_dispatch', base, frame.f_lineno, event, frame.f_code.co_name, is_file_to_ignore)
if is_file_to_ignore:
if DONT_TRACE[base] == LIB_FILE:
if self.not_in_scope(filename):
return None
else:
return None
try:
#this shouldn't give an exception, but it could happen... (python bug)
#see http://mail.python.org/pipermail/python-bugs-list/2007-June/038796.html
#and related bug: http://bugs.python.org/issue1733757
t = threadingCurrentThread()
except:
frame.f_trace = self.trace_dispatch
return self.trace_dispatch
try:
additionalInfo = t.additionalInfo
if additionalInfo is None:
raise AttributeError()
except:
t.additionalInfo = PyDBAdditionalThreadInfo()
additionalInfo = t.additionalInfo
if additionalInfo is None:
return None
if additionalInfo.is_tracing:
f = frame
while f is not None:
if 'trace_dispatch' == f.f_code.co_name:
_fname, bs = GetFilenameAndBase(f)
if bs == 'pydevd_frame.py':
return None #we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
f = f.f_back
# if thread is not alive, cancel trace_dispatch processing
if not isThreadAlive(t):
self.processThreadNotAlive(GetThreadId(t))
return None # suspend tracing
# each new frame...
return additionalInfo.CreateDbFrame((self, filename, additionalInfo, t, frame)).trace_dispatch(frame, event, arg)
except SystemExit:
return None
except Exception:
# Log it
try:
if traceback is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
traceback.print_exc()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
return None
if USE_PSYCO_OPTIMIZATION:
try:
import psyco
trace_dispatch = psyco.proxy(trace_dispatch)
processNetCommand = psyco.proxy(processNetCommand)
processInternalCommands = psyco.proxy(processInternalCommands)
doWaitSuspend = psyco.proxy(doWaitSuspend)
getInternalQueue = psyco.proxy(getInternalQueue)
except ImportError:
if hasattr(sys, 'exc_clear'): # jython does not have it
sys.exc_clear() # don't keep the traceback (let's keep it clear for when we go to the point of executing client code)
if not IS_PY3K and not IS_PY27 and not IS_64_BITS and not sys.platform.startswith("java") and not sys.platform.startswith("cli"):
sys.stderr.write("pydev debugger: warning: psyco not available for speedups (the debugger will still work correctly, but a bit slower)\n")
sys.stderr.flush()
def SetTraceForFrameAndParents(self, frame, also_add_to_passed_frame=True, overwrite_prev_trace=False, dispatch_func=None):
if dispatch_func is None:
dispatch_func = self.trace_dispatch
if also_add_to_passed_frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
while frame:
self.update_trace(frame, dispatch_func, overwrite_prev_trace)
frame = frame.f_back
del frame
def update_trace(self, frame, dispatch_func, overwrite_prev):
if frame.f_trace is None:
frame.f_trace = dispatch_func
else:
if overwrite_prev:
frame.f_trace = dispatch_func
else:
try:
#If it's the trace_exception, go back to the frame trace dispatch!
if frame.f_trace.im_func.__name__ == 'trace_exception':
frame.f_trace = frame.f_trace.im_self.trace_dispatch
except AttributeError:
pass
frame = frame.f_back
del frame
def prepareToRun(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
self.patch_threads()
pydevd_tracing.SetTrace(self.trace_dispatch)
PyDBCommandThread(self).start()
if self.signature_factory is not None:
# we need all data to be sent to IDE even after program finishes
CheckOutputThread(self).start()
def patch_threads(self):
try:
# not available in jython!
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from pydev_monkey import patch_thread_modules
patch_thread_modules()
def get_fullname(self, mod_name):
try:
loader = pkgutil.get_loader(mod_name)
except:
return None
if loader is not None:
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return meth(mod_name)
return None
def run(self, file, globals=None, locals=None, module=False, set_trace=True):
if module:
filename = self.get_fullname(file)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
if set_trace:
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run ust be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(file)[0])
self.prepareToRun()
while not self.readyToRun:
time.sleep(0.1) # busy wait until we receive run command
try:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
traceback.print_exc()
pydev_imports.execfile(file, globals, locals) # execute the script
def exiting(self):
sys.stdout.flush()
sys.stderr.flush()
self.checkOutputRedirect()
cmd = self.cmdFactory.makeExitMessage()
self.writer.addCommand(cmd)
def wait_for_commands(self, globals):
thread = threading.currentThread()
import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = GetThreadId(thread)
import pydevd_vars
pydevd_vars.addAdditionalFrameById(thread_id, {id(frame): frame})
cmd = self.cmdFactory.makeShowConsoleMessage(thread_id, frame)
self.writer.addCommand(cmd)
while True:
self.processInternalCommands()
time.sleep(0.01)
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def processCommandLine(argv):
""" parses the arguments.
removes our arguments from the command line """
setup = {}
setup['client'] = ''
setup['server'] = False
setup['port'] = 0
setup['file'] = ''
setup['multiproc'] = False #Used by PyCharm (reuses connection: ssh tunneling)
setup['multiprocess'] = False # Used by PyDev (creates new connection to ide)
setup['save-signatures'] = False
setup['print-in-debugger-startup'] = False
setup['cmd-line'] = False
setup['module'] = False
i = 0
del argv[0]
while (i < len(argv)):
if argv[i] == '--port':
del argv[i]
setup['port'] = int(argv[i])
del argv[i]
elif argv[i] == '--vm_type':
del argv[i]
setup['vm_type'] = argv[i]
del argv[i]
elif argv[i] == '--client':
del argv[i]
setup['client'] = argv[i]
del argv[i]
elif argv[i] == '--server':
del argv[i]
setup['server'] = True
elif argv[i] == '--file':
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG_RECORD_SOCKET_READS':
del argv[i]
setup['DEBUG_RECORD_SOCKET_READS'] = True
elif argv[i] == '--DEBUG':
del argv[i]
set_debug(setup)
elif argv[i] == '--multiproc':
del argv[i]
setup['multiproc'] = True
elif argv[i] == '--multiprocess':
del argv[i]
setup['multiprocess'] = True
elif argv[i] == '--save-signatures':
del argv[i]
setup['save-signatures'] = True
elif argv[i] == '--print-in-debugger-startup':
del argv[i]
setup['print-in-debugger-startup'] = True
elif (argv[i] == '--cmd-line'):
del argv[i]
setup['cmd-line'] = True
elif (argv[i] == '--module'):
del argv[i]
setup['module'] = True
else:
raise ValueError("unexpected option " + argv[i])
return setup
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port=N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def initStdoutRedirect():
if not getattr(sys, 'stdoutBuf', None):
sys.stdoutBuf = pydevd_io.IOBuf()
sys.stdout_original = sys.stdout
sys.stdout = pydevd_io.IORedirector(sys.stdout, sys.stdoutBuf) #@UndefinedVariable
def initStderrRedirect():
if not getattr(sys, 'stderrBuf', None):
sys.stderrBuf = pydevd_io.IOBuf()
sys.stderr_original = sys.stderr
sys.stderr = pydevd_io.IORedirector(sys.stderr, sys.stderrBuf) #@UndefinedVariable
def has_data_to_redirect():
if getattr(sys, 'stdoutBuf', None):
if not sys.stdoutBuf.empty():
return True
if getattr(sys, 'stderrBuf', None):
if not sys.stderrBuf.empty():
return True
return False
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdoutToServer=False,
stderrToServer=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
@param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
@param stdoutToServer: when this is true, the stdout is passed to the debug server
@param stderrToServer: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
@param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
@param suspend: whether a breakpoint should be emulated as soon as this function is called.
@param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
@param overwrite_prev_trace: if True we'll reset the frame.f_trace of frames which are already being traced
@param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
'''
_set_trace_lock.acquire()
try:
_locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
)
finally:
_set_trace_lock.release()
_set_trace_lock = _pydev_thread.allocate_lock()
def _locked_settrace(
host,
stdoutToServer,
stderrToServer,
port,
suspend,
trace_only_current_thread,
overwrite_prev_trace,
patch_multiprocessing,
):
if patch_multiprocessing:
try:
import pydev_monkey #Jython 2.1 can't use it...
except:
pass
else:
pydev_monkey.patch_new_process_functions()
if host is None:
import pydev_localhost
host = pydev_localhost.get_localhost()
global connected
global bufferStdOutToServer
global bufferStdErrToServer
if not connected :
pydevd_vm_type.SetupType()
debugger = PyDB()
debugger.connect(host, port) # Note: connect can raise error.
# Mark connected only if it actually succeeded.
connected = True
bufferStdOutToServer = stdoutToServer
bufferStdErrToServer = stderrToServer
if bufferStdOutToServer:
initStdoutRedirect()
if bufferStdErrToServer:
initStderrRedirect()
debugger.SetTraceForFrameAndParents(GetFrame(), False, overwrite_prev_trace=overwrite_prev_trace)
CustomFramesContainer.custom_frames_lock.acquire()
try:
for _frameId, custom_frame in DictIterItems(CustomFramesContainer.custom_frames):
debugger.SetTraceForFrameAndParents(custom_frame.frame, False)
finally:
CustomFramesContainer.custom_frames_lock.release()
t = threadingCurrentThread()
try:
additionalInfo = t.additionalInfo
except AttributeError:
additionalInfo = PyDBAdditionalThreadInfo()
t.additionalInfo = additionalInfo
while not debugger.readyToRun:
time.sleep(0.1) # busy wait until we receive run command
# note that we do that through pydevd_tracing.SetTrace so that the tracing
# is not warned to the user!
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
# As this is the first connection, also set tracing for any untraced threads
debugger.setTracingForUntracedContexts(ignore_frame=GetFrame(), overwrite_prev_trace=overwrite_prev_trace)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
PyDBCommandThread(debugger).start()
CheckOutputThread(debugger).start()
#Suspend as the last thing after all tracing is in place.
if suspend:
debugger.setSuspend(t, CMD_THREAD_SUSPEND)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
debugger = GetGlobalDebugger()
debugger.SetTraceForFrameAndParents(GetFrame(), False)
t = threadingCurrentThread()
try:
additionalInfo = t.additionalInfo
except AttributeError:
additionalInfo = PyDBAdditionalThreadInfo()
t.additionalInfo = additionalInfo
pydevd_tracing.SetTrace(debugger.trace_dispatch)
if not trace_only_current_thread:
# Trace future threads?
debugger.patch_threads()
if suspend:
debugger.setSuspend(t, CMD_THREAD_SUSPEND)
def stoptrace():
global connected
if connected:
pydevd_tracing.RestoreSysSetTraceFunc()
sys.settrace(None)
try:
#not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
debugger = GetGlobalDebugger()
if debugger:
debugger.SetTraceForFrameAndParents(
GetFrame(), also_add_to_passed_frame=True, overwrite_prev_trace=True, dispatch_func=lambda *args:None)
debugger.exiting()
killAllPydevThreads()
connected = False
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = StartClient(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.dontTraceMe = False #we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.doKillPydevThread()
except :
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(self, self.dispatcher.client)
def OnRun(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread.OnRun(self)
def handleExcept(self):
ReaderThread.handleExcept(self)
def processCommand(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self.killReceived = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked():
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
host, port = dispatch()
import pydevd_tracing
pydevd_tracing.RestoreSysSetTraceFunc()
if port is not None:
global connected
connected = False
CustomFramesContainerInit()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
)
#=======================================================================================================================
# SetupHolder
#=======================================================================================================================
class SetupHolder:
setup = None
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
# parse the command line. --file is our last argument that is required
try:
sys.original_argv = sys.argv[:]
setup = processCommandLine(sys.argv)
SetupHolder.setup = setup
except ValueError:
traceback.print_exc()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
fix_getpass.fixGetpass()
pydev_log.debug("Executing file %s" % setup['file'])
pydev_log.debug("arguments: %s"% str(sys.argv))
pydevd_vm_type.SetupType(setup.get('vm_type', None))
if os.getenv('PYCHARM_DEBUG'):
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', False)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', -1)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', -1)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
try:
import pydev_monkey
except:
pass #Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
# Note: we're not inside method, so, no need for 'global'
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n" %port)
pydev_log.info("pydev debugger: process %d is connecting\n"% os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
else:
pydev_log.error("pydev debugger: couldn't get port for new debug process\n")
finally:
dispatcher.close()
else:
pydev_log.info("pydev debugger: starting\n")
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.error("Error patching process functions\n")
traceback.print_exc()
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
traceback.print_exc()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
pass # It's ok not having stackless there...
debugger = PyDB()
is_module = setup['module']
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
# as to get here all our imports are already resolved, the psyco module can be
# changed and we'll still get the speedups in the debugger, as those functions
# are already compiled at this time.
try:
import psyco
except ImportError:
if hasattr(sys, 'exc_clear'): # jython does not have it
sys.exc_clear() # don't keep the traceback -- clients don't want to see it
pass # that's ok, no need to mock psyco if it's not available anyways
else:
# if it's available, let's change it for a stub (pydev already made use of it)
import pydevd_psyco_stub
sys.modules['psyco'] = pydevd_psyco_stub
if setup['save-signatures']:
if pydevd_vm_type.GetVmType() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
traceback.print_exc()
sys.exit(1)
connected = True # Mark that we're connected when started from inside ide.
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
| apache-2.0 |
lin-credible/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/linear_model/bayes.py | 14 | 19671 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : float
estimated precision of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
References
----------
D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,
Vol. 4, No. 3, 1992.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
self.X_offset_ = X_offset_
self.X_scale_ = X_scale_
n_samples, n_features = X.shape
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
# Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
# Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ +
lambda_ / alpha_)[:, np.newaxis])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_) /
(lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1) /
(np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /
(rmse_ + 2 * alpha_2))
# Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_) +
n_samples * log(alpha_) -
alpha_ * rmse_ -
(lambda_ * np.sum(coef_ ** 2)) -
logdet_sigma_ -
n_samples * log(2 * np.pi))
self.scores_.append(s)
# Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
sigma_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])
self.sigma_ = (1. / alpha_) * sigma_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
References
----------
D. J. C. MacKay, Bayesian nonlinear modeling for the prediction
competition, ASHRAE Transactions, 1994.
R. Salakhutdinov, Lecture notes on Statistical Machine Learning,
http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15
Their beta is our self.alpha_
Their alpha is our self.lambda_
ARD is a little different than the slide: only dimensions/features for
which self.lambda_ < self.threshold_lambda are kept and the rest are
discarded.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
# Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
# Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
# Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
# Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *
X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
# Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /
((coef_[keep_lambda]) ** 2 +
2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /
(rmse_ + 2. * alpha_2))
# Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
# Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +
np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
# Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_offset_, y_offset_, X_scale_)
return self
def predict(self, X, return_std=False):
"""Predict using the linear model.
In addition to the mean of the predictive distribution, also its
standard deviation can be returned.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Samples.
return_std : boolean, optional
Whether to return the standard deviation of posterior prediction.
Returns
-------
y_mean : array, shape = (n_samples,)
Mean of predictive distribution of query points.
y_std : array, shape = (n_samples,)
Standard deviation of predictive distribution of query points.
"""
y_mean = self._decision_function(X)
if return_std is False:
return y_mean
else:
if self.normalize:
X = (X - self.X_offset_) / self.X_scale_
X = X[:, self.lambda_ < self.threshold_lambda]
sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)
y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))
return y_mean, y_std
| bsd-3-clause |
largelymfs/w2vtools | build/scipy/scipy/signal/spectral.py | 6 | 13467 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy.lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz if `x` is measured in V and computing
the power spectrum ('spectrum') where `Pxx` has units of V**2 if `x` is
measured in V. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series in units of Hz. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg / 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where Pxx has units of V**2/Hz if x is measured in V and computing
the power spectrum ('spectrum') where Pxx has units of V**2 if x is
measured in V. Defaults to 'density'.
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if x.shape[-1] < nperseg:
warnings.warn('nperseg = %d, is greater than x.shape[%d] = %d, using '
'nperseg = x.shape[%d]'
% (nperseg, axis, x.shape[axis], axis))
nperseg = x.shape[-1]
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] > x.shape[-1]:
raise ValueError('window is longer than x.')
nperseg = win.shape[0]
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if noverlap is None:
noverlap = nperseg // 2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
if not hasattr(detrend, '__call__'):
detrend_func = lambda seg: signaltools.detrend(seg, type=detrend)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(seg):
seg = np.rollaxis(seg, -1, axis)
seg = detrend(seg)
return np.rollaxis(seg, axis, len(seg.shape))
else:
detrend_func = detrend
step = nperseg - noverlap
indices = np.arange(0, x.shape[-1]-nperseg+1, step)
if np.isrealobj(x) and return_onesided:
outshape = list(x.shape)
if nfft % 2 == 0: # even
outshape[-1] = nfft // 2 + 1
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
# fftpack.rfft returns the positive frequency part of the fft
# as real values, packed r r i r i r i ...
# this indexing is to extract the matching real and imaginary
# parts, while also handling the pure real zero and nyquist
# frequencies.
if k == 0:
Pxx[..., (0,-1)] = xft[..., (0,-1)]**2
Pxx[..., 1:-1] = xft[..., 1:-1:2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., (0,-1)] += xft[..., (0,-1)]**2 / (k+1.0)
Pxx[..., 1:-1] += (xft[..., 1:-1:2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
else: # odd
outshape[-1] = (nfft+1) // 2
Pxx = np.empty(outshape, x.dtype)
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.rfft(x_dt*win, nfft)
if k == 0:
Pxx[..., 0] = xft[..., 0]**2
Pxx[..., 1:] = xft[..., 1::2]**2 + xft[..., 2::2]**2
else:
Pxx *= k/(k+1.0)
Pxx[..., 0] += xft[..., 0]**2 / (k+1)
Pxx[..., 1:] += (xft[..., 1::2]**2 + xft[..., 2::2]**2) \
/ (k+1.0)
Pxx[..., 1:-1] *= 2*scale
Pxx[..., (0,-1)] *= scale
f = np.arange(Pxx.shape[-1]) * (fs/nfft)
else:
for k, ind in enumerate(indices):
x_dt = detrend_func(x[..., ind:ind+nperseg])
xft = fftpack.fft(x_dt*win, nfft)
if k == 0:
Pxx = (xft * xft.conj()).real
else:
Pxx *= k/(k+1.0)
Pxx += (xft * xft.conj()).real / (k+1.0)
Pxx *= scale
f = fftpack.fftfreq(nfft, 1.0/fs)
if axis != -1:
Pxx = np.rollaxis(Pxx, -1, axis)
return f, Pxx
| mit |
nvoron23/statsmodels | examples/python/formulas.py | 33 | 4968 |
## Formulas: Fitting models using R-style formulas
# Since version 0.5.0, ``statsmodels`` allows users to fit statistical models using R-style formulas. Internally, ``statsmodels`` uses the [patsy](http://patsy.readthedocs.org/) package to convert formulas and data to the matrices that are used in model fitting. The formula framework is quite powerful; this tutorial only scratches the surface. A full description of the formula language can be found in the ``patsy`` docs:
#
# * [Patsy formula language description](http://patsy.readthedocs.org/)
#
# ## Loading modules and functions
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
##### Import convention
# You can import explicitly from statsmodels.formula.api
from statsmodels.formula.api import ols
# Alternatively, you can just use the `formula` namespace of the main `statsmodels.api`.
sm.formula.ols
# Or you can use the following conventioin
import statsmodels.formula.api as smf
# These names are just a convenient way to get access to each model's `from_formula` classmethod. See, for instance
sm.OLS.from_formula
# All of the lower case models accept ``formula`` and ``data`` arguments, whereas upper case ones take ``endog`` and ``exog`` design matrices. ``formula`` accepts a string which describes the model in terms of a ``patsy`` formula. ``data`` takes a [pandas](http://pandas.pydata.org/) data frame or any other data structure that defines a ``__getitem__`` for variable names like a structured array or a dictionary of variables.
#
# ``dir(sm.formula)`` will print(a list of available models.
#
# Formula-compatible models have the following generic call signature: ``(formula, data, subset=None, *args, **kwargs)``
#
# ## OLS regression using formulas
#
# To begin, we fit the linear model described on the [Getting Started](gettingstarted.html) page. Download the data, subset columns, and list-wise delete to remove missing observations:
dta = sm.datasets.get_rdataset("Guerry", "HistData", cache=True)
df = dta.data[['Lottery', 'Literacy', 'Wealth', 'Region']].dropna()
df.head()
# Fit the model:
mod = ols(formula='Lottery ~ Literacy + Wealth + Region', data=df)
res = mod.fit()
print(res.summary())
# ## Categorical variables
#
# Looking at the summary printed above, notice that ``patsy`` determined that elements of *Region* were text strings, so it treated *Region* as a categorical variable. `patsy`'s default is also to include an intercept, so we automatically dropped one of the *Region* categories.
#
# If *Region* had been an integer variable that we wanted to treat explicitly as categorical, we could have done so by using the ``C()`` operator:
res = ols(formula='Lottery ~ Literacy + Wealth + C(Region)', data=df).fit()
print(res.params)
# Patsy's mode advanced features for categorical variables are discussed in: [Patsy: Contrast Coding Systems for categorical variables](contrasts.html)
# ## Operators
#
# We have already seen that "~" separates the left-hand side of the model from the right-hand side, and that "+" adds new columns to the design matrix.
#
# ### Removing variables
#
# The "-" sign can be used to remove columns/variables. For instance, we can remove the intercept from a model by:
res = ols(formula='Lottery ~ Literacy + Wealth + C(Region) -1 ', data=df).fit()
print(res.params)
# ### Multiplicative interactions
#
# ":" adds a new column to the design matrix with the interaction of the other two columns. "*" will also include the individual columns that were multiplied together:
res1 = ols(formula='Lottery ~ Literacy : Wealth - 1', data=df).fit()
res2 = ols(formula='Lottery ~ Literacy * Wealth - 1', data=df).fit()
print(res1.params, '\n')
print(res2.params)
# Many other things are possible with operators. Please consult the [patsy docs](https://patsy.readthedocs.org/en/latest/formulas.html) to learn more.
# ## Functions
#
# You can apply vectorized functions to the variables in your model:
res = sm.ols(formula='Lottery ~ np.log(Literacy)', data=df).fit()
print(res.params)
# Define a custom function:
def log_plus_1(x):
return np.log(x) + 1.
res = sm.ols(formula='Lottery ~ log_plus_1(Literacy)', data=df).fit()
print(res.params)
# Any function that is in the calling namespace is available to the formula.
# ## Using formulas with models that do not (yet) support them
#
# Even if a given `statsmodels` function does not support formulas, you can still use `patsy`'s formula language to produce design matrices. Those matrices
# can then be fed to the fitting function as `endog` and `exog` arguments.
#
# To generate ``numpy`` arrays:
import patsy
f = 'Lottery ~ Literacy * Wealth'
y,X = patsy.dmatrices(f, df, return_type='dataframe')
print(y[:5])
print(X[:5])
# To generate pandas data frames:
f = 'Lottery ~ Literacy * Wealth'
y,X = patsy.dmatrices(f, df, return_type='dataframe')
print(y[:5])
print(X[:5])
print(sm.OLS(y, X).fit().summary())
| bsd-3-clause |
channsoden/hannsoden-bioinformatics | plots.py | 1 | 10700 | #!/usr/bin/env python
from matplotlib import pyplot as plt
from matplotlib.path import Path
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
import numpy as np
import statsmodels.api as sm
import plotting_tools as pt
def box_plot(ax, data, color='black', spacing = 1, offset = 0, sym = 'k+'):
categories = sorted(data.keys())
series = [data[category] for category in categories]
positions = [1+(i*spacing)+offset for i in range(len(series))]
box = ax.boxplot(series, patch_artist=True, positions=positions, sym=sym, widths=0.4, vert=True)
[patch.set_facecolor(color) for patch in box['boxes']]
[patch.set_color(color) for patch in box['boxes']]
[line.set_color('white') for line in box['medians']]
[line.set_color(color) for line in box['whiskers']]
[line.set_color(color) for line in box['caps']]
[sym.set_color(color) for sym in box['fliers']]
ax.set_xticklabels(categories)
def pretty_bar(ax, data, labels, title=None, shift = 0, barwidth=0.5,
barcolor='gray', horizontal=False):
if horizontal:
plotter = ax.barh
xlim = ax.set_ylim
xticks = ax.set_yticks
yticks = ax.set_xticks
xticklabels = ax.set_yticklabels
grid = ax.xaxis.grid
rotation = -90
else:
plotter = ax.bar
xlim = ax.set_xlim
xticks = ax.set_xticks
yticks = ax.set_yticks
xticklabels = ax.set_xticklabels
grid = ax.yaxis.grid
rotation = 0
# Matplotlib assumes you have numerical data for both axes
# But for the X axis, we have categorical data
# So we need to plug in a range of numbers to place the bars at
x = list(range(len(data)))
x = [i + shift for i in x]
# Bars will appear at 0, 1, 2, etc
# So set the x-limits to include these values
xlim(-barwidth, len(data)-barwidth)
# Here we turn the bars grey and remove their edge border.
# Also make the bars a little more narrow.
bars = plotter(x, data, barwidth, align='center', color=barcolor, edgecolor='none')
# Set the ticks to match the bars and label them.
if max([len(l) for l in labels]) > 4:
rotation += 90
xticks(x)
xticklabels(labels, rotation=rotation)
# Hide the frame around the plot
for spine in ax.spines:
ax.spines[spine].set_visible(False)
# Turn off the ticks
ax.tick_params(bottom='off', top='off', left='off', right='off')
# Overlay a white grid on the y axis
grid(True, color='white', linestyle='solid')
if title:
ax.set_title(title, fontweight='bold')
return bars
def manhattan(ax, positions, p_vals, scaffold_positions, color = 'black', sig = 2):
rgba = np.zeros((len(positions), 4))
if type(color) == str:
rgba[:, 0:3] = colors.to_rgba(color)[0:3]
else:
rgba[:, 0:3] = color
#rgba[p_vals < sig, 0:3] = np.array([0., 0., 0.]) # insignificant points are black
rgba[:, 3] = 1 # make opaque
ax.scatter(positions, p_vals, color = rgba, s = 5)
ax.set_ylabel('-log(p)')
max_y = int(max(p_vals)) + 1
ax.set_ylim(-0.3, max_y)
yticks = list(range(max_y + 1))
ax.set_yticks(yticks)
xtick_labels = sorted(list(scaffold_positions.keys()), key=lambda k: scaffold_positions[k])
xticks = [scaffold_positions[k] for k in xtick_labels]
label_locations = [(xticks[i]+xticks[i+1])/2. for i in range(len(xticks)-1)]
ax.set_xlim(0, max(xticks))
ax.set_xticks(xticks)
ax.set_xticklabels('') # turn off major tick labels
ax.set_xticks(label_locations, minor=True)
ax.set_xticklabels(xtick_labels, rotation='vertical', minor=True)
ax.tick_params(axis='x', which='minor', bottom='off')
class regression_plot(object):
def __init__(self, x, y, alpha = 0.05, label = None):
self.x = x
self.y = y
self.label = label
self.alpha = alpha
def regress(self, slope = 'nonzero'):
X = sm.add_constant(self.x)
self.model = sm.OLS(self.y, X, missing='drop')
results = self.model.fit()
self.r2 = results.rsquared
self.intercept, self.slope = results.params[:2]
self.raw_int_p = results.pvalues[0]
if slope == 'nonzero':
self.raw_slope_p = results.pvalues[1]
elif slope == 'negative' or slope == 'positive':
# convert to single-tailed test for appropriate slope
pval = results.pvalues[1] / 2.
if ((slope == 'negative' and self.slope > 0) or
(slope == 'positive' and self.slope < 0)):
pval = 1. - pval
self.raw_slope_p = pval
else:
raise ValueError("slope argument should be 'nonzero', 'negative', or 'positive'.")
self.p_val = self.raw_slope_p
return self.raw_slope_p
def draw(self, ax,
logx = False, logy = False,
xlim = None, ylim = None,
scientific = True,
sig_color = 'k', insig_color = 'r',
sig_style = '--', insig_style = '--',
fit_report_location = None,
fit_report_ha = 'left', fit_report_va = 'bottom',
marker_alpha=0.3,
plot_regression=True):
with np.errstate(divide='ignore'):
if logx:
X = np.log10(self.x)
else:
X = self.x
if logy:
Y = np.log10(self.y)
else:
Y = self.y
ax.scatter(X, Y, c='k', alpha = marker_alpha, edgecolors='none')
if xlim:
ax.set_xlim(*xlim)
if ylim:
ax.set_ylim(*ylim)
if scientific:
pt.scientific(ax)
if plot_regression:
if self.p_val <= self.alpha:
line = '{}{}'.format(sig_style, sig_color)
else:
line = '{}{}'.format(insig_style, insig_color)
regressx = np.linspace(self.x.min(), self.x.max(), num=100)
prediction = regressx * self.slope + self.intercept
with np.errstate(divide='ignore'):
if logx:
regressx = np.log10(regressx)
if logy:
prediction = np.log10(prediction)
ax.plot(regressx, prediction, line)
if fit_report_location:
fit_report = 'R2 = {:.4f}\np = {:.3E}'.format(self.r2, self.p_val)
left, right = ax.get_xlim()
bottom, top = ax.get_ylim()
location = (left + (right-left) * fit_report_location[0],
bottom + (top-bottom) * fit_report_location[1])
ax.text(location[0], location[1],
fit_report,
ha=fit_report_ha, va=fit_report_va)
ax.set_title(self.label)
def radar_factory(num_vars, frame='circle'):
"""Create a radar chart with `num_vars` axes.
This function creates a RadarAxes projection and registers it.
Parameters
----------
num_vars : int
Number of variables for radar chart.
frame : {'circle' | 'polygon'}
Shape of frame surrounding axes.
"""
# calculate evenly-spaced axis angles
theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)
# rotate theta such that the first axis is at the top
theta += np.pi/2
def draw_poly_patch(self):
verts = unit_poly_verts(theta)
return plt.Polygon(verts, closed=True, edgecolor='k')
def draw_circle_patch(self):
# unit circle centered on (0.5, 0.5)
return plt.Circle((0.5, 0.5), 0.5)
patch_dict = {'polygon': draw_poly_patch, 'circle': draw_circle_patch}
if frame not in patch_dict:
raise ValueError('unknown value for `frame`: %s' % frame)
class RadarAxes(PolarAxes):
name = 'radar'
# use 1 line segment to connect specified points
RESOLUTION = 1
# define draw_frame method
draw_patch = patch_dict[frame]
def fill(self, *args, **kwargs):
"""Override fill so that line is closed by default"""
closed = kwargs.pop('closed', True)
return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)
def plot(self, *args, **kwargs):
"""Override plot so that line is closed by default"""
lines = super(RadarAxes, self).plot(*args, **kwargs)
for line in lines:
self._close_line(line)
def _close_line(self, line):
x, y = line.get_data()
# FIXME: markers at x[0], y[0] get doubled-up
if x[0] != x[-1]:
x = np.concatenate((x, [x[0]]))
y = np.concatenate((y, [y[0]]))
line.set_data(x, y)
def set_varlabels(self, labels):
self.set_thetagrids(np.degrees(theta), labels)
def _gen_axes_patch(self):
return self.draw_patch()
def _gen_axes_spines(self):
if frame == 'circle':
return PolarAxes._gen_axes_spines(self)
# The following is a hack to get the spines (i.e. the axes frame)
# to draw correctly for a polygon frame.
# spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.
spine_type = 'circle'
verts = unit_poly_verts(theta)
# close off polygon by repeating first vertex
verts.append(verts[0])
path = Path(verts)
spine = Spine(self, spine_type, path)
spine.set_transform(self.transAxes)
return {'polar': spine}
register_projection(RadarAxes)
return theta
def unit_poly_verts(theta):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [0.5] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
def radar_plot(titles, data):
theta = radar_factory(len(titles), frame='circle')
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='radar')
ax.set_rgrids([0.2*i for i in range(1,6)])
ax.set_ylim(0, 1)
ax.set_title('this is a radar plot')
ax.set_varlabels(titles)
#ax.spines['polar'].set_linewidth(0) # to turn off bounding box
[line.set_visible(False) for line in ax.xaxis.get_gridlines()]
inspect = ax.xaxis.get_gridlines()
print(inspect)
print(dir(inspect))
for d in data:
ax.plot(theta, d)
fig.savefig('radar_plot.png', bbox_inches='tight')
| gpl-3.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/svm/tests/test_svm.py | 6 | 32057 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.multiclass import OneVsRestClassifier
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
| mit |
xaratustrah/pymirko | pymirko.py | 1 | 9318 | #!/usr/bin/env python
"""
MIRKO post processing tools
2016 Xaratustrah
"""
import sys, os, argparse, glob
from subprocess import call
import numpy as np
import matplotlib.pyplot as plt
import fortranformat as ff
MIRKO = 'mirko'
EVET_LOOPS = 10
N_TURNS = 3
MIX_FILE = 'esr_2016-04.mix'
TEMP_FILENAME = 'temp.mak'
MIXFILE_PLACEHOLDER = 'MIXFILE_PLACEHOLDER'
PLACEHOLDER = 'NUMBERPLACEHOLDER'
FILENAME_PLACEHOLDER = 'FILENAME_PLACEHOLDER'
LAST_RING_ELEMENT = 330
def get_apreture_dic():
ffline = ff.FortranRecordReader('(3X,a16,3I4,F17.4,F17.10,2F12.3,I4,2x,a16)')
dic = {}
with open(MIX_FILE) as f:
for _ in range(31):
next(f)
# for i in range(LAST_RING_ELEMENT):
while True:
try:
line = f.readline()
if not line:
break
hh = ffline.read(line)
device_type = int(hh[2])
if device_type == 2:
# this is a drift space, ignore it
continue
# name = 'DRIFT'
else:
name = hh[0].strip()
aperture = hh[6]
if name not in dic:
dic.update({name: aperture})
# print('{},\t{},\t,{}\n'.format(name, device_type, aperture))
# if aperture == 0:
# print(name)
except:
pass
return dic
def loop_mirko(generator_filename):
print('Using MIX file {}.'.format(MIX_FILE))
for i in range(1, EVET_LOOPS + 1):
create_mak_file(i, generator_filename, N_TURNS)
cmd = MIRKO.split()
cmd.append(TEMP_FILENAME)
print('Running MIRKO for event at element No. {}.'.format(i))
call(cmd)
os.remove(TEMP_FILENAME)
def create_mak_file(current_idx, generator_filename, n_turns=1):
# read the header section from a file
with open(generator_filename) as f:
header_section = f.read()
repeat_section = ''.join('aenv,{},{}\n*\n'.format(i, i) for i in range(1, 365 + 1))
middle_section = 'savs,less,penv,,delp,\n*\n'
final_section = 'close,9\n*\npnul,solb,0,0,0,0,0,-0.009,sync\n'
with open(TEMP_FILENAME, 'w') as f:
new_head = header_section.replace(PLACEHOLDER, '{}'.format(current_idx)).replace(MIXFILE_PLACEHOLDER,
'{}'.format(MIX_FILE)).replace(
FILENAME_PLACEHOLDER, 'result_at_{:03d}.txt'.format(current_idx))
f.write(new_head)
for i in range(n_turns):
f.write(repeat_section)
f.write(middle_section)
f.write(final_section)
def get_data_from_result_file(filename):
dic = get_apreture_dic()
arr = np.array([])
arr_of_z_at_ends = np.array([])
current_turn_number = 1
z_at_end = 0
with open(filename) as f:
for line in f:
s = line.split()
if not len(s) == 6:
continue
try:
number = int(s[-6])
z = float(s[-4])
# add the circumference BEFORE checking last element
z_cont = z + z_at_end
# check for last element now
if number == LAST_RING_ELEMENT:
# this is really cool:
z_at_end += z
arr_of_z_at_ends = np.append(arr_of_z_at_ends, z_at_end)
current_turn_number += 1
up = float(s[-3])
down = float(s[-2])
ref = float(s[-1])
# do this one at last, to make advantage of try/except block
aperture = dic[s[-5].strip()]
arr = np.append(arr, (number, aperture, z, z_cont, up, down, ref, current_turn_number))
except(ValueError, IndexError, KeyError):
# boah!
pass
arr = np.reshape(arr, (int(len(arr)) / 8, 8))
return arr, arr_of_z_at_ends
def check_particle_loss(arr):
el_number = 0
loss_z_cont = 0
loss_ref = 0
current_turn_number = 0
for i in range(np.shape(arr)[0]):
# check if the particle hits the aperture
if arr[i, 4] >= arr[i, 1] or arr[i, 5] < (-1 * arr[i, 1]):
# determine loss position using z_cont
el_number, loss_z_cont, loss_ref, current_turn_number = int(arr[i, 0]), arr[i, 3], arr[i, 6], int(arr[i, 7])
return el_number, loss_z_cont, loss_ref, current_turn_number
def check_particle_at_element(arr, element_number, element_x_min, element_x_max):
pock_z = np.array([])
pock_x = np.array([])
current_turn_number_array = np.array([])
for i in range(np.shape(arr)[0]):
# check the position of particle at a specific element
if arr[i, 0] == element_number:
pock_z = np.append(pock_z, arr[i, 3])
current_turn_number_array = np.append(current_turn_number_array, arr[i, 7])
if arr[i, 6] >= element_x_min and arr[i, 6] <= element_x_max:
print('Turn {}: Particle hits pocket detector (element number {} at {}mm).'.format(int(arr[i, 7]),
element_number,
arr[i, 6]))
pock_x = np.append(pock_x, arr[i, 6])
# we already found a hit, so quit
break
else:
print('Turn {}: Particle misses pocket detector (element number {}).'.format(int(arr[i, 7]),
element_number))
pock_x = np.append(pock_x, 0)
return pock_z, pock_x, current_turn_number_array
def save_to_file(arr):
np.savetxt('result_array.txt', arr, delimiter=',')
# --------------------
def plot_data(arr, arr_of_z_at_ends, filename):
fig = plt.figure()
ax = fig.add_subplot(111)
# plot lines first
ax.plot(arr[:, 3], arr[:, 4], 'g-')
ax.plot(arr[:, 3], arr[:, 6], 'b-.')
ax.plot(arr[:, 3], arr[:, 5], 'g-')
# plot vertical lines for turns
for i in range(len(arr_of_z_at_ends)):
plt.axvline(arr_of_z_at_ends[i], color='b', linestyle='--')
# find loss point
el_number, loss_z_cont, loss_ref, current_turn_number = check_particle_loss(arr)
if el_number == 0 and loss_z_cont == 0 and loss_ref == 0:
print('Particle would survive for all turns if no pocket detectors were inside.')
else:
print('Particle is lost at element number.'.format(el_number))
ax.plot(loss_z_cont, loss_ref, 'rv')
# check position at pocket detector which is element 229
pock_z, pock_x, current_turn_number_array = check_particle_at_element(arr, 229, 22, 82)
for i in range(len(pock_z)):
ax.axvline(pock_z[i], color='r', linestyle='--')
if pock_x[i] != 0:
ax.plot(pock_z[i], pock_x[i], 'rD')
ax.annotate('Pocket detector hit (turn {})'.format(int(current_turn_number_array[i])),
xy=(pock_z[i], pock_x[i]), xytext=(0.4, 0.8),
textcoords='figure fraction',
xycoords='data',
arrowprops=dict(width=1, headwidth=5, edgecolor='blue', facecolor='blue', shrink=0.05))
else:
ax.plot(pock_z[i], pock_x[i], 'rx')
# finalize the plot
plt.grid(True)
plt.xlabel('Path [mm]')
plt.ylabel('Offset [mm]')
filename_wo_ext = os.path.splitext(filename)[0]
plt.title(filename_wo_ext)
fig.savefig(os.path.splitext(filename_wo_ext)[0] + '.png', dpi=400)
# plt.show()
def main():
parser = argparse.ArgumentParser(prog='pymirko')
parser.add_argument('--verbose', action='store_true', help='Increase verbosity.')
parser.add_argument('--loop', action='store_true', help='Loop MIRKO.')
parser.add_argument('--plot', action='store_true', help='Plot results file.')
parser.add_argument('--check', action='store_true', help='Only check misses and hits.')
parser.add_argument('--many', action='store_true', help='Plot loop.')
parser.add_argument('filename', nargs=1, type=str, help='Input file name.')
args = parser.parse_args()
# check the first switches
filename = args.filename[0]
if not os.path.exists(filename):
print('Please enter a valid filename.')
return
if args.loop and args.plot:
parser.print_help()
return
if args.loop:
loop_mirko(filename)
if args.plot:
arr, arr_of_z_at_ends = get_data_from_result_file(filename)
save_to_file(arr)
plot_data(arr, arr_of_z_at_ends, filename)
if args.check:
arr, _ = get_data_from_result_file(filename)
check_particle_at_element(arr, 229, 22, 82)
if args.many:
for file in glob.glob("result_at_*.txt"):
print(file)
arr, arr_of_z_at_ends = get_data_from_result_file(file)
# check_particle_at_element(arr, 229, 22, 82)
plot_data(arr, arr_of_z_at_ends, file)
# --------------------
if __name__ == '__main__':
main()
| gpl-3.0 |
jacobdein/nacoustik | nacoustik/colormaps.py | 1 | 2457 | """
colormaps
Jacob Dein 2016
nacoustik
Author: Jacob Dein
License: MIT
"""
from matplotlib.colors import LinearSegmentedColormap
def spectro_white():
color_list = [
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(0.8823529411764706, 0.9725490196078431, 1.0),
(0.8235294117647058, 0.9450980392156862, 1.0),
(0.7647058823529411, 0.9098039215686274, 1.0),
(0.7058823529411765, 0.8666666666666667, 1.0),
(0.6470588235294118, 0.8156862745098039, 1.0),
(0.5882352941176471, 0.7568627450980392, 1.0),
(0.5294117647058824, 0.6862745098039216, 1.0),
(0.47058823529411764, 0.611764705882353, 1.0),
(0.4117647058823529, 0.5254901960784314, 1.0),
(0.35294117647058826, 0.43137254901960786, 1.0),
(0.29411764705882354, 0.3333333333333333, 1.0),
(0.24705882352941178, 0.23529411764705882, 1.0),
(0.25098039215686274, 0.17647058823529413, 1.0),
(0.25882352941176473, 0.11764705882352941, 1.0),
(0.2980392156862745, 0.0, 1.0),
(0.0, 0.5019607843137255, 0.050980392156862744),
(0.03137254901960784, 0.5411764705882353, 0.0),
(0.0784313725490196, 0.5607843137254902, 0.0),
(0.12941176470588237, 0.5803921568627451, 0.0),
(0.1803921568627451, 0.6, 0.0),
(0.23529411764705882, 0.6196078431372549, 0.0),
(0.3568627450980392, 0.6588235294117647, 0.0),
(0.4235294117647059, 0.6784313725490196, 0.0),
(0.49019607843137253, 0.7019607843137254, 0.0),
(0.5607843137254902, 0.7215686274509804, 0.0),
(0.6352941176470588, 0.7411764705882353, 0.0),
(0.7137254901960784, 0.7607843137254902, 0.0),
(0.7803921568627451, 0.7647058823529411, 0.0),
(0.8, 0.7215686274509804, 0.0),
(1.0, 0.9019607843137255, 0.5019607843137255),
(1.0, 0.8666666666666667, 0.4666666666666667),
(1.0, 0.8352941176470589, 0.43529411764705883),
(1.0, 0.796078431372549, 0.4),
(1.0, 0.7568627450980392, 0.3686274509803922),
(1.0, 0.7098039215686275, 0.3333333333333333),
(1.0, 0.6627450980392157, 0.30196078431372547),
(1.0, 0.615686274509804, 0.26666666666666666),
(1.0, 0.5607843137254902, 0.23529411764705882),
(1.0, 0.5058823529411764, 0.2),
(1.0, 0.44313725490196076, 0.16862745098039217),
(1.0, 0.3803921568627451, 0.13333333333333333),
(1.0, 0.3176470588235294, 0.09803921568627451),
(1.0, 0.24705882352941178, 0.06666666666666667),
(1.0, 0.17647058823529413, 0.03137254901960784),
(1.0, 0.10196078431372549, 0.0)
]
return LinearSegmentedColormap.from_list(name='spectro_white', colors=color_list) | mit |
Ecotrust/growth-yield-batch | scripts/extract.py | 2 | 16956 | #!/usr/bin/env python
"""extract.py
Extract variables from directories with FVS runs with offset plots; uses fvs .out files
Usage:
extract.py INDIR OUTCSV
extract.py (-h | --help)
extract.py --version
Options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from pandas import DataFrame, merge
from collections import defaultdict
import glob
import os
import re
import json
def parse_name(filename):
"""
>>> parse_name("/path/to/some/varWC_rx25_cond31566_site3_climNoClimate_off20.key")
>>> parse_name("varWC_rx25_cond31566_site3_climNoClimate_off20.key")
>>> parse_name("varWC_rx25_cond31566_site3_climNoClimate_off20")
{'var': 'WC', 'cond': '31566', 'rx': '25', 'site': '3', 'offset': '20', 'climate': 'NoClimate'}
"""
basename = os.path.splitext(os.path.basename(filename))[0]
exp = re.compile("var([a-zA-Z]+)_rx([0-9a-zA-Z]+)_cond([0-9a-zA-Z]+)_site([0-9a-zA-Z]+)_clim([0-9a-zA-Z-]+)_off([0-9]+)")
parts = exp.match(basename).groups()
conv_parts = []
for part in parts:
try:
part = int(part)
except ValueError:
part = str(part)
conv_parts.append(part)
keys = ("var", "rx", "cond", "site", "climate", "offset")
return dict(zip(keys, conv_parts))
def classify_tree(spz, diam):
diam_class = int(diam / 10.0)
return "%s_%s" % (spz, diam_class)
def split_fixed(line, fixed_schema, failsafe=False):
funcs = {'int': int, 'float': float, 'str': str}
data = {}
for var in fixed_schema:
try:
data[var[0]] = funcs[var[3]](line[var[1]-1:var[2]])
except ValueError as e:
if failsafe:
data[var[0]] = None
else:
raise e
return data
def extract_data(indir):
carbon_rows = []
harvested_carbon_rows = []
econ_rows = []
harvest_rows = []
summary_rows = []
activity_rows = []
for outfile in glob.glob(os.path.join(indir, "*.out")):
info = parse_name(outfile)
############# Extract Stand Carbon Report
ready = False
countdown = None
with open(outfile, 'r') as fh:
lines = fh.readlines()
for line in lines:
if "STAND CARBON REPORT" in line:
# We've found the carbon report, data starts 9 lines down
ready = True
countdown = 9
if not ready or countdown > 0:
if countdown:
countdown -= 1
continue
if line.strip() == "":
# blank line == we're done
break
# Got it: this is a data line
"""
'year', 'agl', 'agl_merch', 'bgl', 'bgd', 'dead', 'ddw', 'floor', 'shbhrb',
'total_stand_carbon', 'total_removed_carbon', 'carbon_fire'
"""
fixed_schema = [
('year', 1, 4, 'int'),
('agl', 5, 13, 'float'),
('bgl', 23, 31, 'float'),
('dead', 41, 49, 'float'),
('total_stand_carbon', 77, 85, 'float'),
]
data = split_fixed(line.strip(), fixed_schema)
# calculate our own carbon
carbon = float(data['agl']) + float(data['bgl']) + float(data['dead'])
data['calc_carbon'] = carbon
# need to include variant?
data.update(info)
carbon_rows.append(data)
############# Extract Harvested Carbon Report
ready = False
countdown = None
for line in lines:
if "HARVESTED PRODUCTS REPORT" in line and not line.startswith("CARBCUT"):
# We've found the harvested products carbon report, data starts 9 lines down
ready = True
countdown = 9
if not ready or countdown > 0:
if countdown:
countdown -= 1
continue
if line.strip() == "":
# blank line == we're done
break
# Got it: this is a data line
fixed_schema = [
('year', 1, 4, 'int'),
('merch_carbon_stored', 41, 49, 'float'),
('merch_carbon_removed', 50, 58, 'float'),
]
data = split_fixed(line.strip(), fixed_schema)
# need to include variant?
data.update(info)
harvested_carbon_rows.append(data)
############# Extract ECONOMIC ANALYSIS SUMMARY REPORT
ready = False
countdown = None
for line in lines:
if line.startswith("ECONOMIC ANALYSIS SUMMARY REPORT"):
# We've found the econ summary report, data starts 6 lines down
ready = True
countdown = 6
if not ready or countdown > 0:
if countdown:
countdown -= 1
continue
if line.strip() == "":
# blank line == we're done
break
# Got it: this is a data line
fixed_schema = [
('year', 1, 5, 'int'),
('undiscounted_revenue', 29, 37, 'int'), # TODO Check all these once DD gets econ reporting in place
('econ_removed_merch_ft3', 101, 107, 'int'),
('econ_removed_merch_bdft', 108, 114, 'int'),
]
data = split_fixed(line.strip(), fixed_schema)
# need to include variant?
data.update(info)
econ_rows.append(data)
# # ############# Extract HARVEST VOLUME AND GROSS VALUE REPORT
ready = False
countdown = None
within_year = None
yeardata = defaultdict(list)
blanks = 0
for line in lines:
if line.startswith("HARVEST VOLUME AND GROSS VALUE REPORT"):
# We've found the econ summary report, data starts 2 lines down
ready = True
countdown = 2
if not ready or countdown > 0:
if countdown:
countdown -= 1
continue
if line.strip() == "":
# 3 blank lines == we're done
blanks += 1
if blanks == 3:
break
continue
if line.strip().startswith("-------"):
# single blank line == we're done with this TIME PERIOD
blanks = 0
within_year = None
continue
if line.startswith(" YEAR = "):
within_year = int(line[8:12])
countdown = 3
continue
if not within_year:
continue
fixed_schema = [
('spp', 1, 8, 'str'),
('mindiam', 11, 16, 'float'),
('maxdiam', 17, 24, 'float'), # TODO Check all these once DD gets econ reporting in place
('harv_ft3', 69, 76, 'int'),
('harv_bdft', 88, 95, 'int'),
]
d = split_fixed(line.strip(), fixed_schema, failsafe=True)
d['spp'] = d['spp'].strip()
yeardata[within_year].append(d)
for k, v in yeardata.items():
data = {'year': k, 'harvest_report': json.dumps(v)}
data.update(info)
harvest_rows.append(data)
############# Extract Summary Statistics
ready = False
countdown = None
data = None
for line in lines:
if "SUMMARY STATISTICS (PER ACRE OR STAND BASED ON TOTAL STAND AREA)" in line:
# We've found the summary stats, data starts 7 lines down
ready = True
countdown = 7
if not ready or countdown > 0:
if countdown:
countdown -= 1
continue
if line.strip() == "":
# blank line == we're done
break
# Got it: this is a data line
"""
'year', 'age', 'num_trees', 'ba', 'sdi', 'ccf', 'top_ht', 'qmd', 'total_ft3',
'merch_ft3', 'merch_bdft', 'cut_trees', 'cut_total_ft3', 'cut_merch_ft3',
'cut_merch_bdft', 'after_ba', 'after_sdi', 'after_ccf', 'after_ht', 'after_qmd',
'growth_yrs', 'growth_accreper', 'growth_mortyear', 'mai_merch_ft3', 'for_ss_typ_zt'
"""
fixed_schema = [
('year', 1, 4, 'int'),
('age', 5, 8, 'int'),
('start_tpa', 9, 14, 'int'),
('start_ba', 15, 18, 'int'),
('start_total_ft3', 37, 42, 'int'),
('start_merch_ft3', 43, 48, 'int'),
('start_merch_bdft', 49, 54, 'int'),
('removed_tpa', 56, 60, 'int'),
('removed_total_ft3', 61, 66, 'int'),
('removed_merch_ft3', 67, 72, 'int'),
('removed_merch_bdft', 73, 78, 'int'),
('after_ba', 79, 82, 'int'),
('after_sdi', 83, 87, 'int'),
('after_qmd', 96, 100, 'float'),
('accretion', 109, 113, 'int'),
('mortality', 114, 119, 'int'),
('fortype', 129, 131, 'int'),
('size_class', 133, 133, 'int'),
('stocking_class', 134, 134, 'int')
]
data = split_fixed(line.strip(), fixed_schema)
data['after_tpa'] = data['start_tpa'] - data['removed_tpa']
data['after_total_ft3'] = data['start_total_ft3'] - data['removed_total_ft3']
data['after_merch_ft3'] = data['start_merch_ft3'] - data['removed_merch_ft3']
data['after_merch_bdft'] = data['start_merch_bdft'] - data['removed_merch_bdft']
data.update(info)
summary_rows.append(data)
############# Extract Activity Summary
# List of Compute Variables to look for
looking_for = [
# Harvest BF by species group
"PINE_HRV",
"SPRC_HRV",
"CEDR_HRV",
"DF_HRV",
"HW_HRV",
"MNCONHRV",
"MNHW_HRV",
"WJ_HRV",
"WW_HRV",
# Standing BF by species group
"PINE_BF",
"SPRC_BF",
"CEDR_BF",
"DF_BF",
"HW_BF",
"MNCONBF",
"MNHW_BF",
"WJ_BF",
"WW_BF",
# ??? Are we using these ??
"SPPRICH",
"SPPSIMP",
# Cost Model
"SM_CF",
"SM_HW",
"SM_TPA",
"LG_CF",
"LG_HW",
"LG_TPA",
"CH_CF",
"CH_HW",
"CH_TPA",
"CUT_TYPE",
# "PLANT" ESTAB PLANT; multiple per year though!
# Habitat
"NSONEST",
"NSOFRG",
"NSODIS",
# Pests
#"PP_BTL",
#"LP_BTL",
"PINEBTL",
"DF_BTL",
"ES_BTL",
"DEFOL",
# Fire
"FIREHZD",
]
ready = False
countdown = None
within_year = None
data = {}
for line in lines:
if "ACTIVITY SUMMARY" in line:
# We've found the summary stats, data starts x lines down
ready = True
countdown = 9
if not ready or countdown > 0:
if countdown:
countdown -= 1
continue
if line.strip() == "":
# blank line == we're done with this TIME PERIOD
within_year = None
activity_rows.append(data)
data = {}
continue
if line.startswith("-----"):
activity_rows.append(data)
break
# This is the start of a time period
if not within_year:
within_year = int(line[7:11])
data['year'] = within_year
data.update(info)
# initialize year with null values for all variables
for var in looking_for:
data[var] = None
# special case ESTB PLANT keyword
data['PLANT'] = 0
else:
var = line[24:34].strip()
status = line[40:59].strip() # disregard NOT DONE or DELETED OR CANCELED
if status.startswith("DONE IN") and var in looking_for:
val = float(line[61:72]) # Is this wide enough??
data[var] = val
elif status.startswith("DONE IN") and var == 'PLANT':
# special case ESTB PLANT keyword aggregates second column
val = float(line[73:82])
data[var] += val
# load into pandas dataframes, join
activity_df = DataFrame(activity_rows)
summary_df = DataFrame(summary_rows)
carbon_df = DataFrame(carbon_rows)
econ_df = DataFrame(econ_rows)
harvest_df = DataFrame(harvest_rows)
harvested_carbon_df = DataFrame(harvested_carbon_rows)
final_merge = merge(summary_df, activity_df, how='outer',
on=['var', 'rx', 'cond', 'site', 'offset', 'year', 'climate'])
final_merge = merge(final_merge, harvested_carbon_df, how='outer',
on=['var', 'rx', 'cond', 'site', 'offset', 'year', 'climate'])
final_merge = merge(final_merge, carbon_df, how="outer",
on=['var', 'rx', 'cond', 'site', 'offset', 'year', 'climate'])
final_merge = merge(final_merge, econ_df, how="outer",
on=['var', 'rx', 'cond', 'site', 'offset', 'year', 'climate'])
if len(harvest_rows) > 0:
final_merge = merge(final_merge, harvest_df, how="outer",
on=['var', 'rx', 'cond', 'site', 'offset', 'year', 'climate'])
# manage types
final_merge[['offset']] = final_merge[['offset']].astype(int)
final_merge[['rx']] = final_merge[['rx']].astype(int)
final_merge[['year']] = final_merge[['year']].astype(int)
final_merge[['cond']] = final_merge[['cond']].astype(int)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# IMPORTANT NOTE
# The data structure of `final_merge` must match the schema in run_fvs.create_data_db() (~ line 303)
# Use the code below to generate a schema
# hint, may need to switch some to REAL if nulls exists (which show up as 'object' dtype)
# in general there should be very few TEXTs: var, climate and harvest_report being the current ones
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# for col, dtype in zip(final_merge.columns, final_merge.dtypes):
# if 'int' in str(dtype):
# print '"%s" INTEGER, -- %s' % (col, dtype)
# elif 'float' in str(dtype):
# print '"%s" REAL, -- %s' % (col, dtype)
# else:
# print '"%s" TEXT, -- %s' % (col, dtype)
return final_merge
if __name__ == "__main__":
args = docopt(__doc__, version='1.0')
indir = os.path.abspath(args['INDIR'])
csv = os.path.abspath(args['OUTCSV'])
df = extract_data(indir)
df.to_csv(csv, index=False, header=True)
keys = [x.lower() for x in df.columns]
vals = [x.name for x in df.dtypes]
print "-" * 80
print "class FVSAggregate(models.Model):"
for colname, coltype in zip(keys, vals):
if coltype == "float64":
print " %s = models.FloatField(null=True, blank=True)" % colname
elif coltype == "int64":
print " %s = models.IntegerField(null=True, blank=True)" % colname
elif coltype == "object" and colname in ['var']:
print " %s = models.CharField(max_length=2)" % colname
elif coltype == "object" and colname in ['site', 'cond', 'offset', 'rx']:
print " %s = models.IntegerField()" % colname
else: # default
print " %s = models.FloatField(null=True, blank=True)" % colname
print "-" * 80
print """
COPY trees_fvsaggregate(%s)
FROM '%s'
DELIMITER ',' CSV HEADER;""" % (",".join(['"%s"' % x for x in keys]), "merged_file.csv")
print "-" * 80
print """
cd /usr/local/data/out
# copy header
sed -n 1p first.csv > merged_file.csv
#copy all but the first line from all other files
for i in *.csv; do sed 1d $i; done >> merged_file.csv"""
print "-" * 80
print """
1. Run fvsbatch
2. copy model defn
3. schemamigration
4. migrate
5. sed merge csvs
6. postgres copy
7. create indicies
"""
print
| bsd-3-clause |
rlabbe/filterpy | docs/conf.py | 3 | 11304 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# FilterPy documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 22 14:54:37 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import mock
MOCK_MODULES = ['numpy', 'scipy', 'matplotlib', 'matplotlib.pyplot',
'scipy.linalg', 'numpy.linalg', 'matplotlib.pyplot',
'numpy.random', 'scipy.sparse', 'scipy.sparse.linalg',
'scipy.stats', 'matplotlib.patches', 'scipy.ndimage.filters',
'scipy.ndimage.interpolation', 'scipy.ndimage']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, os.path.abspath('../filterpy'))
sys.path.insert(0, os.path.abspath('../'))
from filterpy import *
import filterpy
import filterpy.kalman
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'numpydoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'FilterPy'
copyright = '2014-2016, Roger R. Labbe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = filterpy.__version__
# The full version, including alpha/beta/rc tags.
release = filterpy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = "autolink"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FilterPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FilterPy.tex', 'FilterPy Documentation',
'Roger R. Labbe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'filterpy', 'FilterPy Documentation',
['Roger R. Labbe'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FilterPy', 'FilterPy Documentation',
'Roger R. Labbe', 'FilterPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'FilterPy'
epub_author = 'Roger R. Labbe'
epub_publisher = 'Roger R. Labbe'
epub_copyright = '2015, Roger R. Labbe'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'FilterPy'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autodoc_member_order = 'bysource'
| mit |
dymkowsk/mantid | scripts/FilterEvents/MplFigureCanvas.py | 3 | 1355 | #pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class MplFigureCanvas(FigureCanvas):
""" A customized Qt widget for matplotlib figure.
It can be used to replace GraphicsView of QtGui
"""
def __init__(self, parent):
""" Initialization
"""
# Instantialize matplotlib Figure
self.fig = Figure()
self.axes = self.fig.add_subplot(111)
# Initialize parent class and set parent
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
# Set size policy to be able to expanding and resizable with frame
FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
return
def plot(self, x, y):
""" Plot a set of data
Argument:
- x: numpy array X
- y: numpy array Y
"""
self.x = x
self.y = y
self.axes.plot(self.x, self.y)
return
def getPlot(self):
""" reture figure's axes to expose the matplotlib figure to PyQt client
"""
return self.axes
| gpl-3.0 |
miguelotemagno/imagestion | imagestion_1.0/test-train-pyBrain.py | 1 | 5972 | import scipy
from scipy import ndimage
from scipy import stats
from scipy.misc import toimage
import math
import matplotlib.pyplot as plt
import numpy as np
import colorsys, sys, os
from PIL import Image, ImageDraw, ImageFont
from scipy.optimize import curve_fit
from scipy.misc import factorial
from Segmentation import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer
from pybrain.structure import FeedForwardNetwork
import pickle
## from ANN import *
#-----------------------------------------------------------------------
def HSVColor(img):
if isinstance(img,Image.Image):
r,g,b = img.split()
Hdat = []
Sdat = []
Vdat = []
for rd,gn,bl in zip(r.getdata(),g.getdata(),b.getdata()) :
h,s,v = colorsys.rgb_to_hsv(rd/255.,gn/255.,bl/255.)
Hdat.append(int(h*255.))
Sdat.append(int(s*255.))
Vdat.append(int(v*255.))
r.putdata(Hdat)
g.putdata(Sdat)
b.putdata(Vdat)
return Image.merge('RGB',(r,g,b))
else:
return None
def plotHistogram(arr, b):
hist, bins = np.histogram(arr, bins=b)
#width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center') #, width=width)
plt.show()
# http://stackoverflow.com/questions/16373425/add-text-on-image-using-pil
def showImage(img, text):
draw = ImageDraw.Draw(img)
# font = ImageFont.truetype(<font-file>, <font-size>)
font = ImageFont.truetype("Pillow/Tests/fonts/FreeMono.ttf", 16)
# draw.text((x, y),"Sample Text",(r,g,b))
draw.text((10, 10),text) #,font=font,fill=(255,255,255))
img.show()
#-----------------------------------------------------------------------
# Referencias: http://www.scipy-lectures.org/advanced/image_processing/
# http://gis.stackexchange.com/questions/24827/how-to-smooth-the-polygons-in-a-contour-map
# http://scikit-image.org
# https://sites.google.com/site/bustosmerino/home/segmentacion---color-de-piel
imgFile = sys.argv[1]
dbFile = sys.argv[2]
print imgFile
print dbFile
shape1 = (2,2)
shape2 = (6,6)
seg = Segmentation(imgFile)
rgb = seg.rgb
rgb1 = np.array(seg.erodeRGB(shape1))
rgb2 = np.array(seg.dilateRGB(shape2))
diff = rgb2 - rgb1
seg.rgb2hsv()
seg.erodeHSV(shape2)
seg.dilateHSV(shape1)
seg.statisticalDispersionHSV()
hsv = seg.getHSV()
mask = seg.getHSVmask()
invMask = ~mask
mask[mask != 0xFF] = 0
piel = seg.applyMask2Rgb(mask)
invMask[invMask != 0xFF] = 0
fondo = seg.applyMask2Rgb(invMask)
seg.setRGB(piel)
img1 = np.array(seg.erodeRGB(shape1))
img2 = np.array(seg.dilateRGB(shape2))
diff2 = img2 - img1
## toimage(seg.maskH).show()
## toimage(seg.maskS).show()
## toimage(seg.maskV).show()
## toimage(hsv).show()
## toimage(mask).show()
## toimage(invMask).show()
toimage(piel).show()
## toimage(fondo).show()
## toimage(diff).show()
## toimage(diff2).show()
#-----------------------------------------------------------------------
#http://pybrain.org/docs/quickstart/dataset.html
ds = SupervisedDataSet(3, 1)
hist = {}
muestra1 = []
muestra2 = []
i = 0
for y in range(seg.height):
## if i > 10:
## break
for x in range(seg.width):
r,g,b = piel.getpixel((x,y))
key = "x%02x%02x%02x" % (r, g, b)
hist[key] = hist[key] + 1 if key in hist else 0
if (r|g|b and hist[key] == 0) :
muestra1 = [float(r)/256, float(g)/256, float(b)/256]
print "%05d (%02x, %02x, %02x) => [1] %s" % (i,r,g,b, muestra1)
ds.addSample((muestra1[0], muestra1[1], muestra1[2]), (1))
## ds.append([muestra1, [1]])
i += 1
i = 0
for y in range(seg.height):
## if i > 10:
## break
for x in range(seg.width):
r,g,b = fondo.getpixel((x,y))
key = "x%02x%02x%02x" % (r, g, b)
hist[key] = hist[key] + 1 if key in hist else 0
if (r|g|b and hist[key] == 0) :
muestra2 = [float(r)/255, float(g)/255, float(b)/255]
print "%05d (%02x, %02x, %02x) => [0] %s" % (i,r,g,b, muestra2)
ds.addSample((muestra2[0], muestra2[1], muestra2[2]), (0))
## ds.append([muestra2, [0]])
i += 1
print '#########################'
#http://pybrain.org/docs/api/supervised/trainers.html#pybrain.supervised.trainers.BackpropTrainer
#http://pybrain.org/docs/quickstart/training.html
#http://pybrain.org/docs/tutorial/netmodcon.html#netmodcon
fileObject = open(dbFile, 'a+')
newFile = False
try:
if os.stat(dbFile).st_size == 0 :
raise Exception("File %s is empty, proceed to create network" % (dbFile))
print "Loading %s for feed new data" % (dbFile)
net = pickle.load(fileObject)
except Exception as e:
print e
net = buildNetwork(3, 3, 1, bias=True, hiddenclass=TanhLayer)
newFile = True
epochs = 500
threshold = 0.01
error = 1
trainer = BackpropTrainer(net, ds, threshold)
## if newFile == True :
## while error > threshold:
## error = trainer.train()
## epochs -= 1
## print "%d) e -> %f" % (epochs, error)
## if epochs <= 0:
## break
print "1.- Train Until Convergence #####################################"
trainer.trainUntilConvergence(ds, epochs, True)
print ("epochs:%d error:%f" % (epochs,error))
print net.activate([muestra1[0], muestra1[1], muestra1[2]])
print net.activate([muestra2[0], muestra2[1], muestra2[2]])
## net = ANN(3, 4, 1, 0.001)
## net.iniciar_perceptron()
## net.entrenar_perceptron(ds, 5000)
## net.clasificar(ds)
#http://stackoverflow.com/questions/6006187/how-to-save-and-recover-pybrain-training
pickle.dump(net, fileObject)
fileObject.close()
print "2.- Segmentation ################################################"
# http://effbot.org/imagingbook/image.htm
for y in range(seg.height):
for x in range(seg.width):
r,g,b = rgb.getpixel((x,y))
pixel = [float(r)/255, float(g)/255, float(b)/255]
test = net.activate(pixel)
if (test[0] < 0.5 ) :
rgb.putpixel((x,y), 0)
toimage(rgb).show()
| gpl-2.0 |
dialounke/pylayers | pylayers/simul/examples/ex_simulem_fur.py | 3 | 1157 | from pylayers.simul.simulem import *
from pylayers.signal.bsignal import *
from pylayers.measures.mesuwb import *
import matplotlib.pyplot as plt
from pylayers.gis.layout import *
#M=UWBMesure(173)
M=UWBMesure(13)
#M=UWBMesure(1)
cir=TUsignal()
cirf=TUsignal()
#cir.readcir("where2cir-tx001-rx145.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx145.mat","Tx001")
cir.readcir("where2cir-tx002-rx012.mat","Tx002")
#cirf.readcir("where2-furcir-tx002-rx012.mat","Tx002")
#cir.readcir("where2cir-tx001-rx001.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx001.mat","Tx001")
plt.ion()
fig = plt.figure()
fig.subplots_adjust(hspace=0.5)
ax1 = fig.add_subplot(411,title="points and layout")
L=Layout()
L.load('siradel-cut-fur.ini')
#L.build()
L.showGs(fig=fig,ax=ax1)
ax1.plot(M.tx[0],M.tx[1],'or')
#ax1.plot(M.rx[1][0],M.rx[1][1],'ob')
ax1.plot(M.rx[2][0],M.rx[2][1],'ob')
ax2 = fig.add_subplot(412,title="Measurement")
M.tdd.ch2.plot()
#ax3 = fig.add_subplot(413,title="Simulation with furniture",sharex=ax2,sharey=ax2)
#cirf.plot(col='red')
ax4 = fig.add_subplot(414,title="Simulation",sharex=ax2,sharey=ax2)
cir.plot(col='blue')
plt.show()
| mit |
CKPalk/MachineLearning | FinalProject/MachineLearning/Stacking/ConfusionMatrix/confusion_matrix.py | 1 | 1584 | ''' Work of Cameron Palk '''
import sys
import pandas as pd
def getDifferenceMatrix( csv ):
df = pd.read_csv( csv )
all_labels = sorted(df.Label.unique())
RF_matrix = { label: [0 for _ in all_labels] for label in all_labels }
P_matrix = { label: [0 for _ in all_labels] for label in all_labels }
KNN_matrix = { label: [0 for _ in all_labels] for label in all_labels }
for idx, row in df.iterrows():
real_idx = row.Label
RF_pred_idx = all_labels.index( row.Random_Forest )
P_pred_idx = all_labels.index( row.Perceptron )
KNN_pred_idx = all_labels.index( row.KNN )
RF_matrix [ real_idx ][ RF_pred_idx ] += 1
P_matrix [ real_idx ][ P_pred_idx ] += 1
KNN_matrix[ real_idx ][ KNN_pred_idx ] += 1
return ({ 'Random_Forest':RF_matrix,
'Perceptron':P_matrix,
'KNN':KNN_matrix }, all_labels )
#
def main( argv ):
try:
csv = argv[ 1 ]
except IndexError:
print( "Error ** Usage: \"python3 {} <csv>\"".format( argv[ 0 ] ) )
#
differenceMatrix = getDifferenceMatrix( csv )
for name, matrix in differenceMatrix[0].items():
print( "\nConfusion Matrix for", name )
df = pd.DataFrame( matrix, index=differenceMatrix[1] )
df.to_csv( "{}_confusion_matrix.csv".format( name ) )
print( "Saved confusion matrix for", name )
'''
s = [[str(e) for e in row] for row in matrix]
lens = [max(map(len, col))-1 for col in zip(*s)]
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
table = [fmt.format(*row) for row in s]
print( '\n'.join(table) )
'''
print( "\nDone.\n" )
#
if __name__=='__main__':
main( sys.argv )
#
| mit |
iamkakadong/SparseRL | chain_walk_run.py | 1 | 4271 | import MDP.chain_walk as chain_walk
import MDP.chain_walk_policy as chain_walk_policy
import numpy as np
import td.fast_elastic_td as elastic_td
import td.elastic_td as elastic
import td.lstd as lstd
import matplotlib.pyplot as plt
if __name__ == '__main__':
gamma = 0.9
length = 20
# Define environment and policy
env = chain_walk.chain_walk(gamma, length)
policy = chain_walk_policy.chain_walk_policy(length)
# Set policy to optimal policy, i.e. move left if state < 10, move right if state >= 10 (state index start with 0)
p_mat = np.zeros([20, 2]) #+ 0.5
p_mat[0:10, 0] = 1
p_mat[10::, 1] = 1
policy.set_policy(p_mat)
# Get true value function for the policy
vf = env.get_vf(policy)
# Generate a sequence of 1000 noisy samples with 20 irrelavent features from the environment
n_noisy = 800
n_samples = 1000
n_iter = 500 #n_samples / length
state_seq = []
next_state_seq = []
action_seq = []
reward_seq = []
for i in range(n_samples):
# set to a new state every 50 iterations
if i % n_iter == 0:
env.set_cur_state(9 + i / n_iter)
state_seq.append(env.get_noisy_state(n_noisy))
else:
state_seq.append(sample[2])
# Each sample is a tuple (action, reward, next state)
sample = env.noisy_sample_corr(policy, n_noisy)
action_seq.append(sample[0])
reward_seq.append(sample[1])
next_state_seq.append(sample[2])
# running lstd
agent = lstd.lstd(0.0, 3 + n_noisy, gamma)
state_seq.append(next_state_seq[-1])
agent.set_start(state_seq[0])
prev_state = state_seq[0]
for i in range(len(reward_seq)):
if i == 500:
agent.set_start(state_seq[i])
prev_state = state_seq[i]
else:
agent.update_V(prev_state, state_seq[i + 1], reward_seq[i])
prev_state = state_seq[i + 1]
state_seq.pop()
theta = agent.get_theta()
print theta
# generate feature vectors for all states
x = np.arange(length)
phi_x = np.c_[np.ones(length), x, x ** 2]
# calculate the aproximated value function
beta_x = theta[0:3]
V_y = np.dot(phi_x, beta_x)
# parameters for Elastic_TD
# mu: parameter for augmented Lagrangian
# epsilon: parameter for equility constraint
# delta: paramter for l1-norm and l2-norm
# stop_ep: parameter for stopping criteria (ADMM)
mu = 10
epsilon = 0.01
stop_ep = 0.01
eta = 0.5
# # running putong Elastic_TD
# alg = elastic.Elastic_TD(n_samples, n_noisy + 3, gamma)
# beta_putong = alg.run(mu, epsilon, delta, stop_ep, np.array(state_seq), np.array(next_state_seq), np.array(reward_seq))
# print(beta_putong)
# # generate feature vectors for all states
# x = np.arange(length)
# phi_x = np.c_[np.ones(length), x, x ** 2]
# # calculate the aproximated value function
# beta_x = beta_putong[0:3]
# V_x = np.dot(phi_x, beta_x)
# # generate the stationary distribution
# D = np.diag(env.get_stationary(policy))
# # calculate the MSE
# v = V_x - vf[:,0]
# loss2 = np.dot(np.dot(v.T, D), v)
# print loss2
# # running l2
# delta = 0.0
# alg = elastic_td.Elastic_TD(n_samples, n_noisy + 3, gamma)
# beta_l2 = alg.run(mu, epsilon, delta, stop_ep, eta, np.array(state_seq), np.array(next_state_seq), np.array(reward_seq))
# print(beta_l2)
# # running Elastic_TD
# delta = 0.5
# alg = elastic_td.Elastic_TD(n_samples, n_noisy + 3, gamma)
# beta_elas = alg.run(mu, epsilon, delta, stop_ep, eta, np.array(state_seq), np.array(next_state_seq), np.array(reward_seq))
# print(beta_elas)
# running l1
delta = 1
alg = elastic_td.Elastic_TD(n_samples, n_noisy + 3, gamma)
beta_l1 = alg.run(mu, epsilon, delta, stop_ep, eta, np.array(state_seq), np.array(next_state_seq), np.array(reward_seq))
print(beta_l1)
mse, truth, pred = env.compute_mse(policy, theta, n_noisy, mc_iter=1000, restart=200)
mse_l1, truth, pred = env.compute_mse(policy, beta_l1, n_noisy, mc_iter=1000, restart=200)
print mse, mse_l1
V_x = np.dot(phi_x, beta_l1[0:3])
plt.plot(V_x)
plt.plot(V_y)
plt.plot(vf)
plt.show()
| gpl-3.0 |
andnovar/ggplot | ggplot/tests/test_reverse.py | 12 | 1028 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from ggplot.tests import image_comparison
from ggplot import *
import numpy as np
import pandas as pd
@image_comparison(baseline_images=['scale_without_reverse', 'scale_y_reverse', 'scale_x_reverse', 'scale_both_reverse'], extensions=["png"])
def test_scale_reverse():
df = pd.DataFrame({"x": np.arange(0, 100),
"y": np.arange(0, 100),
"z": np.arange(0, 100)})
df['cat'] = np.where(df.x*2 > 50, 'blah', 'blue')
df['cat'] = np.where(df.y > 50, 'hello', df.cat)
df['cat2'] = np.where(df.y < 15, 'one', 'two')
df['y'] = np.sin(df.y)
gg = ggplot(aes(x="x", y="y", shape="cat2", color="cat"), data=df) + geom_line()
print(gg)
print(gg + scale_y_reverse())
print(gg + scale_x_reverse())
print(gg + scale_x_reverse() + scale_y_reverse())
| bsd-2-clause |
lbdreyer/iris | lib/iris/symbols.py | 5 | 7616 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
Contains symbol definitions for use with :func:`iris.plot.symbols`.
"""
import itertools
import math
from matplotlib.patches import PathPatch
from matplotlib.path import Path
import numpy as np
__all__ = ("CLOUD_COVER",)
# The thickness to use for lines, circles, etc.
_THICKNESS = 0.1
def _make_merged_patch(paths):
# Convert a list of Path instances into a single, black PathPatch.
# Prepare empty vertex/code arrays for the merged path.
# The vertex array is initially flat for convenient initialisation,
# but is then reshaped to (N, 2).
total_len = sum(len(path) for path in paths)
all_vertices = np.empty(total_len * 2)
all_codes = np.empty(total_len, dtype=Path.code_type)
# Copy vertex/code details from the source paths
all_segments = itertools.chain(*(path.iter_segments() for path in paths))
i_vertices = 0
i_codes = 0
for vertices, code in all_segments:
n_vertices = len(vertices)
all_vertices[i_vertices : i_vertices + n_vertices] = vertices
i_vertices += n_vertices
n_codes = n_vertices // 2
if code == Path.STOP:
code = Path.MOVETO
all_codes[i_codes : i_codes + n_codes] = code
i_codes += n_codes
all_vertices.shape = (total_len, 2)
return PathPatch(
Path(all_vertices, all_codes), facecolor="black", edgecolor="none"
)
def _ring_path():
# Returns a Path for a hollow ring.
# The outer radius is 1, the inner radius is 1 - _THICKNESS.
circle = Path.unit_circle()
inner_radius = 1.0 - _THICKNESS
vertices = np.concatenate(
[circle.vertices[:-1], circle.vertices[-2::-1] * inner_radius]
)
codes = np.concatenate([circle.codes[:-1], circle.codes[:-1]])
return Path(vertices, codes)
def _vertical_bar_path():
# Returns a Path for a vertical rectangle, with width _THICKNESS, that will
# nicely overlap the result of _ring_path().
width = _THICKNESS / 2.0
inner_radius = 1.0 - _THICKNESS
vertices = np.array(
[
[-width, -inner_radius],
[width, -inner_radius],
[width, inner_radius],
[-width, inner_radius],
[-width, inner_radius],
]
)
codes = np.array(
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
)
return Path(vertices, codes)
def _slot_path():
# Returns a Path for a filled unit circle with a vertical rectangle
# removed.
circle = Path.unit_circle()
vertical_bar = _vertical_bar_path()
vertices = np.concatenate(
[circle.vertices[:-1], vertical_bar.vertices[-2::-1]]
)
codes = np.concatenate([circle.codes[:-1], vertical_bar.codes[:-1]])
return Path(vertices, codes)
def _left_bar_path():
# Returns a Path for the left-hand side of a horizontal rectangle, with
# height _THICKNESS, that will nicely overlap the result of _ring_path().
inner_radius = 1.0 - _THICKNESS
height = _THICKNESS / 2.0
vertices = np.array(
[
[-inner_radius, -height],
[0, -height],
[0, height],
[-inner_radius, height],
[-inner_radius, height],
]
)
codes = np.array(
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
)
return Path(vertices, codes)
def _slash_path():
# Returns a Path for diagonal, bottom-left to top-right rectangle, with
# width _THICKNESS, that will nicely overlap the result of _ring_path().
half_width = _THICKNESS / 2.0
central_radius = 1.0 - half_width
cos45 = math.cos(math.radians(45))
end_point_offset = cos45 * central_radius
half_width_offset = cos45 * half_width
vertices = np.array(
[
[
-end_point_offset - half_width_offset,
-end_point_offset + half_width_offset,
],
[
-end_point_offset + half_width_offset,
-end_point_offset - half_width_offset,
],
[
end_point_offset + half_width_offset,
end_point_offset - half_width_offset,
],
[
end_point_offset - half_width_offset,
end_point_offset + half_width_offset,
],
[
-end_point_offset - half_width_offset,
-end_point_offset + half_width_offset,
],
]
)
codes = np.array(
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
)
return Path(vertices, codes)
def _backslash_path():
# Returns a Path for diagonal, top-left to bottom-right rectangle, with
# width _THICKNESS, that will nicely overlap the result of _ring_path().
half_width = _THICKNESS / 2.0
central_radius = 1.0 - half_width
cos45 = math.cos(math.radians(45))
end_point_offset = cos45 * central_radius
half_width_offset = cos45 * half_width
vertices = np.array(
[
[
-end_point_offset - half_width_offset,
end_point_offset - half_width_offset,
],
[
end_point_offset - half_width_offset,
-end_point_offset - half_width_offset,
],
[
end_point_offset + half_width_offset,
-end_point_offset + half_width_offset,
],
[
-end_point_offset + half_width_offset,
end_point_offset + half_width_offset,
],
[
-end_point_offset - half_width_offset,
end_point_offset - half_width_offset,
],
]
)
codes = np.array(
[Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
)
return Path(vertices, codes)
def _wedge_fix(wedge_path):
"""
Fixes the problem with Path.wedge where it doesn't initialise the first,
and last two vertices.
This fix should not have any side-effects once Path.wedge has been fixed,
but will then be redundant and should be removed.
This is fixed in MPL v1.3, raising a RuntimeError. A check is performed to
allow for backward compatibility with MPL v1.2.x.
"""
if wedge_path.vertices.flags.writeable:
wedge_path.vertices[0] = 0
wedge_path.vertices[-2:] = 0
return wedge_path
CLOUD_COVER = {
0: [_ring_path()],
1: [_ring_path(), _vertical_bar_path()],
2: [_ring_path(), _wedge_fix(Path.wedge(0, 90))],
3: [_ring_path(), _wedge_fix(Path.wedge(0, 90)), _vertical_bar_path()],
4: [_ring_path(), Path.unit_circle_righthalf()],
5: [_ring_path(), Path.unit_circle_righthalf(), _left_bar_path()],
6: [_ring_path(), _wedge_fix(Path.wedge(-180, 90))],
7: [_slot_path()],
8: [Path.unit_circle()],
9: [_ring_path(), _slash_path(), _backslash_path()],
}
"""
A dictionary mapping WMO cloud cover codes to their corresponding symbol.
See http://www.wmo.int/pages/prog/www/DPFS/documents/485_Vol_I_en_colour.pdf
Part II, Appendix II.4, Graphical Representation of Data, Analyses
and Forecasts
"""
def _convert_paths_to_patches():
# Convert the symbols defined as lists-of-paths into patches.
for code, symbol in CLOUD_COVER.items():
CLOUD_COVER[code] = _make_merged_patch(symbol)
_convert_paths_to_patches()
| lgpl-3.0 |
zjonke/EImotif | simulations/stp/show_figure.py | 1 | 1803 | from eim.settings_loader import GeneralSettings, DataSettings
from eim.common import DictClass
from eim.data import loadData
from eim.spike_train import train_sec2ms
from figure_helper import plotSTPFig
import matplotlib.pyplot as plt
########## PLOTTING SETTINGS ##########
train_start_time_ms = 0
train_end_time_ms = 2500
max_nrns_to_plot = 200
#################################################
########## LOAD DATA ##########
ter = DictClass(loadData('results/testing_short.shelf'))
ted = DictClass(loadData('data/testing_short.shelf'))
anr = DictClass(loadData('results/analysis.shelf'))
#################################################
# LOAD SETTINGS
gs = GeneralSettings()
ds = DataSettings(gs.dataPath + gs.dataSettings)
# PREPARE
IDs = ds.patternIDs
train = ted.train
pd = train.pd
patlen = train.patlen
pc = [(0.0, 0.3, 1.0), (0.0, 0.8, 0.0)]
# input neurons spikes
spikesIN_ms = train_sec2ms(ter.spikes['in'])[::2] # take every 2nd
# excitatory neurons spikes
spikesE_ms = train_sec2ms(ter.spikes['e'])
# pattern prefered neurons - spikes
P1_spikesE_ms = [spikesE_ms[i] for i in anr.nrns_inds_P1]
P2_spikesE_ms = [spikesE_ms[i] for i in anr.nrns_inds_P2]
#non prefered neurons - subset
rest_spikesE_ms = [spikesE_ms[i] for i in anr.nrns_nondist][:max_nrns_to_plot - len(P1_spikesE_ms)-len(P2_spikesE_ms)]
# inhibitory neurons spikes
spikesI_ms = train_sec2ms(ter.spikes['i'])[::2] # take every 2nd
# PLOT FIGURE
plotSTPFig(train, pd, patlen, ted.pg,
train_start_time_ms, train_end_time_ms, IDs, pc,
spikesIN_ms, P1_spikesE_ms, P2_spikesE_ms, rest_spikesE_ms, spikesI_ms,
anr.nrntracesP1_P1, anr.nrntracesP1_P2, anr.nrntracesP2_P1, anr.nrntracesP2_P2)
# SAVE FIGURE
plt.savefig('plots/fig.png', dpi=600)
plt.savefig('plots/fig.eps')
| mit |
vibhorag/scikit-learn | sklearn/linear_model/logistic.py | 57 | 65098 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (DataConversionWarning,
check_X_y, NotFittedError)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual, sample_weight):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver in ['liblinear', 'sag']:
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
if solver == 'liblinear' and sample_weight is not None:
raise ValueError("Solver %s does not support "
"sample weights." % solver)
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
coefs = list()
warm_start_sag = {'coef': w0}
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, 'log', 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum,
warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual, sample_weight)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
max_iter : int
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state)
self.n_iter_ = np.array([n_iter_])
return self
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg' and 'lbfgs' handle
multinomial loss; 'sag' and 'liblinear' are limited to
one-versus-rest schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for 'lbfgs' and
'newton-cg' solvers.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual, sample_weight)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
max_squared_sum = get_max_squared_sum(X) if self.solver == 'sag' \
else None
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
nguyentu1602/statsmodels | statsmodels/tsa/vector_ar/plotting.py | 31 | 7291 | from statsmodels.compat.python import lrange, range
import numpy as np
import statsmodels.tsa.vector_ar.util as util
class MPLConfigurator(object):
def __init__(self):
self._inverse_actions = []
def revert(self):
for action in self._inverse_actions:
action()
def set_fontsize(self, size):
import matplotlib as mpl
old_size = mpl.rcParams['font.size']
mpl.rcParams['font.size'] = size
def revert():
mpl.rcParams['font.size'] = old_size
self._inverse_actions.append(revert)
#-------------------------------------------------------------------------------
# Plotting functions
def plot_mts(Y, names=None, index=None):
"""
Plot multiple time series
"""
import matplotlib.pyplot as plt
k = Y.shape[1]
rows, cols = k, 1
plt.figure(figsize=(10, 10))
for j in range(k):
ts = Y[:, j]
ax = plt.subplot(rows, cols, j+1)
if index is not None:
ax.plot(index, ts)
else:
ax.plot(ts)
if names is not None:
ax.set_title(names[j])
def plot_var_forc(prior, forc, err_upper, err_lower,
index=None, names=None, plot_stderr=True):
import matplotlib.pyplot as plt
n, k = prior.shape
rows, cols = k, 1
fig = plt.figure(figsize=(10, 10))
prange = np.arange(n)
rng_f = np.arange(n - 1, n + len(forc))
rng_err = np.arange(n, n + len(forc))
for j in range(k):
ax = plt.subplot(rows, cols, j+1)
p1 = ax.plot(prange, prior[:, j], 'k', label='Observed')
p2 = ax.plot(rng_f, np.r_[prior[-1:, j], forc[:, j]], 'k--',
label='Forecast')
if plot_stderr:
p3 = ax.plot(rng_err, err_upper[:, j], 'k-.',
label='Forc 2 STD err')
ax.plot(rng_err, err_lower[:, j], 'k-.')
if names is not None:
ax.set_title(names[j])
ax.legend(loc='upper right')
def plot_with_error(y, error, x=None, axes=None, value_fmt='k',
error_fmt='k--', alpha=0.05, stderr_type = 'asym'):
"""
Make plot with optional error bars
Parameters
----------
y :
error : array or None
"""
import matplotlib.pyplot as plt
if axes is None:
axes = plt.gca()
x = x if x is not None else lrange(len(y))
plot_action = lambda y, fmt: axes.plot(x, y, fmt)
plot_action(y, value_fmt)
#changed this
if error is not None:
if stderr_type == 'asym':
q = util.norm_signif_level(alpha)
plot_action(y - q * error, error_fmt)
plot_action(y + q * error, error_fmt)
if stderr_type in ('mc','sz1','sz2','sz3'):
plot_action(error[0], error_fmt)
plot_action(error[1], error_fmt)
def plot_full_acorr(acorr, fontsize=8, linewidth=8, xlabel=None,
err_bound=None):
"""
Parameters
----------
"""
import matplotlib.pyplot as plt
config = MPLConfigurator()
config.set_fontsize(fontsize)
k = acorr.shape[1]
fig, axes = plt.subplots(k, k, figsize=(10, 10), squeeze=False)
for i in range(k):
for j in range(k):
ax = axes[i][j]
acorr_plot(acorr[:, i, j], linewidth=linewidth,
xlabel=xlabel, ax=ax)
if err_bound is not None:
ax.axhline(err_bound, color='k', linestyle='--')
ax.axhline(-err_bound, color='k', linestyle='--')
adjust_subplots()
config.revert()
return fig
def acorr_plot(acorr, linewidth=8, xlabel=None, ax=None):
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
if xlabel is None:
xlabel = np.arange(len(acorr))
ax.vlines(xlabel, [0], acorr, lw=linewidth)
ax.axhline(0, color='k')
ax.set_ylim([-1, 1])
# hack?
ax.set_xlim([-1, xlabel[-1] + 1])
def plot_acorr_with_error():
pass
def adjust_subplots(**kwds):
import matplotlib.pyplot as plt
passed_kwds = dict(bottom=0.05, top=0.925,
left=0.05, right=0.95,
hspace=0.2)
passed_kwds.update(kwds)
plt.subplots_adjust(**passed_kwds)
#-------------------------------------------------------------------------------
# Multiple impulse response (cum_effects, etc.) cplots
def irf_grid_plot(values, stderr, impcol, rescol, names, title,
signif=0.05, hlines=None, subplot_params=None,
plot_params=None, figsize=(10,10), stderr_type='asym'):
"""
Reusable function to make flexible grid plots of impulse responses and
comulative effects
values : (T + 1) x k x k
stderr : T x k x k
hlines : k x k
"""
import matplotlib.pyplot as plt
if subplot_params is None:
subplot_params = {}
if plot_params is None:
plot_params = {}
nrows, ncols, to_plot = _get_irf_plot_config(names, impcol, rescol)
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True,
squeeze=False, figsize=figsize)
# fill out space
adjust_subplots()
fig.suptitle(title, fontsize=14)
subtitle_temp = r'%s$\rightarrow$%s'
k = len(names)
rng = lrange(len(values))
for (j, i, ai, aj) in to_plot:
ax = axes[ai][aj]
# HACK?
if stderr is not None:
if stderr_type == 'asym':
sig = np.sqrt(stderr[:, j * k + i, j * k + i])
plot_with_error(values[:, i, j], sig, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
if stderr_type in ('mc','sz1','sz2','sz3'):
errs = stderr[0][:, i, j], stderr[1][:, i, j]
plot_with_error(values[:, i, j], errs, x=rng, axes=ax,
alpha=signif, value_fmt='b', stderr_type=stderr_type)
else:
plot_with_error(values[:, i, j], None, x=rng, axes=ax,
value_fmt='b')
ax.axhline(0, color='k')
if hlines is not None:
ax.axhline(hlines[i,j], color='k')
sz = subplot_params.get('fontsize', 12)
ax.set_title(subtitle_temp % (names[j], names[i]), fontsize=sz)
def _get_irf_plot_config(names, impcol, rescol):
nrows = ncols = k = len(names)
if impcol is not None and rescol is not None:
# plot one impulse-response pair
nrows = ncols = 1
j = util.get_index(names, impcol)
i = util.get_index(names, rescol)
to_plot = [(j, i, 0, 0)]
elif impcol is not None:
# plot impacts of impulse in one variable
ncols = 1
j = util.get_index(names, impcol)
to_plot = [(j, i, i, 0) for i in range(k)]
elif rescol is not None:
# plot only things having impact on particular variable
ncols = 1
i = util.get_index(names, rescol)
to_plot = [(j, i, j, 0) for j in range(k)]
else:
# plot everything
to_plot = [(j, i, i, j) for i in range(k) for j in range(k)]
return nrows, ncols, to_plot
#-------------------------------------------------------------------------------
# Forecast error variance decomposition
| bsd-3-clause |
akaszynski/vtkInterface | setup.py | 1 | 2456 | """
Installation file for python pyvista module
"""
import os
import platform
import sys
import warnings
from io import open as io_open
from setuptools import setup
package_name = 'pyvista'
__version__ = None
filepath = os.path.dirname(__file__)
version_file = os.path.join(filepath, package_name, '_version.py')
with io_open(version_file, mode='r') as fd:
exec(fd.read())
# pre-compiled vtk available for python3
install_requires = ['numpy',
'imageio',
'appdirs',
'scooby>=0.5.1',
'meshio>=4.0.3, <5.0',
]
# add vtk if not windows and 2.7
py_ver = int(sys.version[0])
if os.name == 'nt' and (py_ver < 3 or '64' not in platform.architecture()[0]):
warnings.warn('\nYou will need to install VTK manually.'
' Try using Anaconda. See:\n'
'https://anaconda.org/anaconda/vtk')
else:
install_requires.append(['vtk'])
readme_file = os.path.join(filepath, 'README.rst')
setup(
name=package_name,
packages=[package_name, 'pyvista.examples', 'pyvista.core',
'pyvista.plotting', 'pyvista.utilities'],
version=__version__,
description='Easier Pythonic interface to VTK',
long_description=io_open(readme_file, encoding="utf-8").read(),
author='PyVista Developers',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
url='https://github.com/pyvista/pyvista',
keywords='vtk numpy plotting mesh',
package_data={'pyvista.examples': ['airplane.ply', 'ant.ply', 'channels.vti',
'hexbeam.vtk', 'sphere.ply',
'uniform.vtk', 'rectilinear.vtk',
'globe.vtk', '2k_earth_daymap.jpg']},
python_requires='>=3.5.*',
install_requires=install_requires,
extras_require={
'colormaps': ['matplotlib', 'colorcet', 'cmocean']
},
)
| mit |
ifuding/Kaggle | SVPC/Code/philly/lgb_predict.py | 3 | 2602 | import pandas as pd
import time
import numpy as np
import gc
from feature_engineer import gen_features
from feature_engineer import timer
import keras_train
from nfold_train import nfold_train, models_eval
import tensorflow as tf
import os
import shutil
from lcc_sample import neg_sample
from sklearn import metrics
import lightgbm as lgb
from main import *
DENSE_FEATURE_TYPE = keras_train.DENSE_FEATURE_TYPE
def find_best_iteration_search(bst):
"""
"""
valide_df = load_valide_data()
valide_data = valide_df[keras_train.USED_FEATURE_LIST].values.astype(DENSE_FEATURE_TYPE)
valide_label = valide_df['is_attributed'].values.astype(np.uint8)
del valide_df
gc.collect()
if FLAGS.stacking:
valide_data = gen_stacking_data(valide_data)
pos_cnt = valide_label.sum()
neg_cnt = len(valide_label) - pos_cnt
print ("valide type: {0} valide size: {1} valide data pos: {2} neg: {3}".format(
valide_data.dtype, len(valide_data), pos_cnt, neg_cnt))
with timer("finding best iteration..."):
search_iterations = [int(ii.strip()) for ii in FLAGS.search_iterations.split(',')]
for i in range(search_iterations[0], search_iterations[1], search_iterations[2]):
y_pred = bst.predict(valide_data, num_iteration=i)
score = metrics.roc_auc_score(valide_label, y_pred)
loss = metrics.log_loss(valide_label, y_pred)
print ("Iteration: {0} AUC: {1} Logloss: {2}".format(i, score, loss))
def predict_test(bst):
test_df = load_test_data()
test_data = test_df[keras_train.USED_FEATURE_LIST].values.astype(DENSE_FEATURE_TYPE)
test_id = test_df['click_id'].values #.astype(np.uint32)
print ("test type {0}".format(test_data.dtype))
del test_df
gc.collect()
if FLAGS.stacking:
test_data = gen_stacking_data(test_data)
with timer("predicting test data"):
print('predicting test data...')
sub_re = pd.DataFrame(test_id, columns = ['click_id'])
sub_re['is_attributed'] = bst.predict(test_data, num_iteration=FLAGS.best_iteration)
time_label = time.strftime('_%Y_%m_%d_%H_%M_%S', time.gmtime())
sub_name = FLAGS.output_model_path + "sub" + time_label + ".csv"
sub_re.to_csv(sub_name, index=False)
if __name__ == "__main__":
# load model to predict
bst = lgb.Booster(model_file= FLAGS.input_previous_model_path + '/model.txt')
if FLAGS.search_best_iteration:
find_best_iteration_search(bst)
else:
predict_test(bst) | apache-2.0 |
turi-code/SFrame | oss_src/unity/python/sframe/data_structures/sframe.py | 5 | 231056 | from __future__ import print_function
"""
This module defines the SFrame class which provides the
ability to create, access and manipulate a remote scalable dataframe object.
SFrame acts similarly to pandas.DataFrame, but the data is completely immutable
and is stored column wise on the GraphLab Server side.
"""
'''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from .. import connect as _mt
from ..connect import main as glconnect
from ..cython.cy_flexible_type import infer_type_of_list
from ..cython.context import debug_trace as cython_context
from ..cython.cy_sframe import UnitySFrameProxy
from ..util import _is_non_string_iterable, _make_internal_url
from ..util import infer_dbapi2_types
from ..util import get_module_from_object, pytype_to_printf
from .sarray import SArray, _create_sequential_sarray
from .. import aggregate
from .image import Image as _Image
from ..deps import pandas, HAS_PANDAS, HAS_NUMPY
from .grouped_sframe import GroupedSFrame
import array
from prettytable import PrettyTable
from textwrap import wrap
import datetime
import time
import itertools
import logging as _logging
import os
import subprocess
import uuid
import platform
import numbers
import sys
import six
import csv
__all__ = ['SFrame']
__LOGGER__ = _logging.getLogger(__name__)
SFRAME_GARBAGE_COLLECTOR = []
SFRAME_GRAPHLABUTIL_REF = None
FOOTER_STRS = ['Note: Only the head of the SFrame is printed.',
'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.']
LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.',
'You can use sf.materialize() to force materialization.']
root_package_name = __import__(__name__.split('.')[0]).__name__
SFRAME_ROOTS = [# Binary/lib location in production egg
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)), '..')),
# Build tree location of SFrame binaries
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..','..','oss_src','sframe')),
# Location of python sources
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', 'unity', 'python', root_package_name)),
# Build tree dependency location
os.path.abspath(os.path.join(os.path.dirname(
os.path.realpath(__file__)),
'..', '..', '..', '..', '..', '..', 'deps', 'local', 'lib'))
]
SPARK_UNITY = "spark_unity"
HDFS_LIB = "libhdfs.so"
RDD_JAR_FILE = "spark_unity.jar"
RDD_SUPPORT_INITED = False
BINARY_PATHS = {}
STAGING_DIR = None
RDD_SUPPORT = True
PRODUCTION_RUN = False
REMOTE_OS = None
SPARK_SUPPORT_NAMES = {'RDD_JAR_PATH': 'spark_unity.jar'}
first = True
for i in SFRAME_ROOTS:
for key,val in SPARK_SUPPORT_NAMES.items():
tmp_path = os.path.join(i, val)
if key not in BINARY_PATHS and os.path.isfile(tmp_path):
BINARY_PATHS[key] = tmp_path
if all(name in BINARY_PATHS for name in list(SPARK_SUPPORT_NAMES.keys())):
if first:
PRODUCTION_RUN = True
break
first = False
for name in SPARK_SUPPORT_NAMES.keys():
if (name not in BINARY_PATHS):
if sys.platform != 'win32':
__LOGGER__.warn("GraphLab engine cannot find %s" % SPARK_SUPPORT_NAMES[name])
if not all(name in BINARY_PATHS for name in SPARK_SUPPORT_NAMES.keys()):
RDD_SUPPORT = False
if sys.version_info.major > 2:
long = int
def get_spark_integration_jar_path():
"""
The absolute path of the jar file required to enable GraphLab Create's
integration with Apache Spark.
"""
if 'RDD_JAR_PATH' not in BINARY_PATHS:
raise RuntimeError("Could not find a spark integration jar. "\
"Does your version of GraphLab Create support Spark Integration (is it >= 1.0)?")
return BINARY_PATHS['RDD_JAR_PATH']
def __rdd_support_init__(sprk_ctx,graphlab_util_ref):
global REMOTE_OS
global RDD_SUPPORT_INITED
global STAGING_DIR
global BINARY_PATHS
if not RDD_SUPPORT or RDD_SUPPORT_INITED:
return
sprk_ctx._jsc.addJar(BINARY_PATHS['RDD_JAR_PATH'])
# Make sure our GraphLabUtil scala functions are accessible from the driver
try:
graphlab_util_ref.getBinaryName()
except:
raise RuntimeError("Could not execute RDD translation functions. "\
"This means either SparkContext is not initialized correctly "\
"or %s is not accessible.\n "\
"jar file path: %s" % (RDD_JAR_FILE,BINARY_PATHS['RDD_JAR_PATH']))
dummy_rdd = sprk_ctx.parallelize([1])
if PRODUCTION_RUN and (sprk_ctx.master.startswith('yarn-client') or sprk_ctx.master.startswith('spark://')):
# Get cluster operating system
os_rdd = dummy_rdd.map(lambda x: platform.system())
REMOTE_OS = os_rdd.collect()[0]
# Set binary path
for i in BINARY_PATHS.keys():
s = BINARY_PATHS[i]
if REMOTE_OS == 'Linux':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s),os.path.basename(s))
elif REMOTE_OS == 'Darwin':
BINARY_PATHS[i] = os.path.join(os.path.dirname(s),os.path.basename(s))
else:
raise RuntimeError("YARN cluster has unsupported operating system "\
"(something other than Linux or Mac OS X). "\
"Cannot convert RDDs on this cluster to SFrame.")
# Create staging directory
staging_dir = '.graphlabStaging'
if sprk_ctx.master.startswith('yarn-client') or sprk_ctx.master.startswith('spark://'):
# Get that staging directory's full name
tmp_loc = graphlab_util_ref.getHadoopNameNode()
STAGING_DIR = os.path.join(tmp_loc, "user", sprk_ctx.sparkUser(), staging_dir)
if STAGING_DIR is None:
raise RuntimeError("Failed to create a staging directory on HDFS. "\
"Do your cluster nodes have a working hdfs client?")
# Actually create the staging dir
unity = glconnect.get_unity()
unity.__mkdir__(STAGING_DIR)
unity.__chmod__(STAGING_DIR, 0o777)
elif sprk_ctx.master[0:5] == 'local':
# Save the output sframes to the same temp workspace this engine is
# using
#TODO: Consider cases where server and client aren't on the same machine
unity = glconnect.get_unity()
STAGING_DIR = unity.get_current_cache_file_location()
if STAGING_DIR is None:
raise RuntimeError("Could not retrieve local staging directory!")
else:
raise RuntimeError("Your spark context's master is '" +
str(sprk_ctx.master) +
"'. Only 'local' and 'yarn-client' are supported.")
RDD_SUPPORT_INITED = True
def load_sframe(filename):
"""
Load an SFrame. The filename extension is used to determine the format
automatically. This function is particularly useful for SFrames previously
saved in binary format. For CSV imports the ``SFrame.read_csv`` function
provides greater control. If the SFrame is in binary format, ``filename`` is
actually a directory, created when the SFrame is saved.
Parameters
----------
filename : string
Location of the file to load. Can be a local path or a remote URL.
Returns
-------
out : SFrame
See Also
--------
SFrame.save, SFrame.read_csv
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.save('my_sframe') # 'my_sframe' is a directory
>>> sf_loaded = graphlab.load_sframe('my_sframe')
"""
sf = SFrame(data=filename)
return sf
def _get_global_dbapi_info(dbapi_module, conn):
"""
Fetches all needed information from the top-level DBAPI module,
guessing at the module if it wasn't passed as a parameter. Returns a
dictionary of all the needed variables. This is put in one place to
make sure the error message is clear if the module "guess" is wrong.
"""
module_given_msg = "The DBAPI2 module given ({0}) is missing the global\n"+\
"variable '{1}'. Please make sure you are supplying a module that\n"+\
"conforms to the DBAPI 2.0 standard (PEP 0249)."
module_not_given_msg = "Hello! I gave my best effort to find the\n"+\
"top-level module that the connection object you gave me came from.\n"+\
"I found '{0}' which doesn't have the global variable '{1}'.\n"+\
"To avoid this confusion, you can pass the module as a parameter using\n"+\
"the 'dbapi_module' argument to either from_sql or to_sql."
if dbapi_module is None:
dbapi_module = get_module_from_object(conn)
module_given = False
else:
module_given = True
module_name = dbapi_module.__name__ if hasattr(dbapi_module, '__name__') else None
needed_vars = ['apilevel','paramstyle','Error','DATETIME','NUMBER','ROWID']
ret_dict = {}
ret_dict['module_name'] = module_name
for i in needed_vars:
tmp = None
try:
tmp = eval("dbapi_module."+i)
except AttributeError as e:
# Some DBs don't actually care about types, so they won't define
# the types. These are the ACTUALLY needed variables though
if i not in ['apilevel','paramstyle','Error']:
pass
elif module_given:
raise AttributeError(module_given_msg.format(module_name, i))
else:
raise AttributeError(module_not_given_msg.format(module_name, i))
ret_dict[i] = tmp
try:
if ret_dict['apilevel'][0:3] != "2.0":
raise NotImplementedError("Unsupported API version " +\
str(ret_dict['apilevel']) + ". Only DBAPI 2.0 is supported.")
except TypeError as e:
e.message = "Module's 'apilevel' value is invalid."
raise e
acceptable_paramstyles = ['qmark','numeric','named','format','pyformat']
try:
if ret_dict['paramstyle'] not in acceptable_paramstyles:
raise TypeError("Module's 'paramstyle' value is invalid.")
except TypeError as e:
raise TypeError("Module's 'paramstyle' value is invalid.")
return ret_dict
# Expects list of tuples
def _force_cast_sql_types(data, result_types, force_cast_cols):
if len(force_cast_cols) == 0:
return data
ret_data = []
for row in data:
row = list(row)
for idx in force_cast_cols:
if row[idx] is not None and result_types[idx] != datetime.datetime:
row[idx] = result_types[idx](row[idx])
ret_data.append(row)
return ret_data
class SFrame(object):
"""
A tabular, column-mutable dataframe object that can scale to big data. The
data in SFrame is stored column-wise on the GraphLab Server side, and is
stored on persistent storage (e.g. disk) to avoid being constrained by
memory size. Each column in an SFrame is a size-immutable
:class:`~graphlab.SArray`, but SFrames are mutable in that columns can be
added and subtracted with ease. An SFrame essentially acts as an ordered
dict of SArrays.
Currently, we support constructing an SFrame from the following data
formats:
* csv file (comma separated value)
* sframe directory archive (A directory where an sframe was saved
previously)
* general text file (with csv parsing options, See :py:meth:`read_csv()`)
* a Python dictionary
* pandas.DataFrame
* JSON
* Apache Avro
* PySpark RDD
and from the following sources:
* your local file system
* the GraphLab Server's file system
* HDFS
* Amazon S3
* HTTP(S).
Only basic examples of construction are covered here. For more information
and examples, please see the `User Guide <https://turi.com/learn/user
guide/index.html#Working_with_data_Tabular_data>`_, `API Translator
<https://turi.com/learn/translator>`_, `How-Tos
<https://turi.com/learn/how-to>`_, and data science `Gallery
<https://turi.com/learn/gallery>`_.
Parameters
----------
data : array | pandas.DataFrame | string | dict, optional
The actual interpretation of this field is dependent on the ``format``
parameter. If ``data`` is an array or Pandas DataFrame, the contents are
stored in the SFrame. If ``data`` is a string, it is interpreted as a
file. Files can be read from local file system or urls (local://,
hdfs://, s3://, http://).
format : string, optional
Format of the data. The default, "auto" will automatically infer the
input data format. The inference rules are simple: If the data is an
array or a dataframe, it is associated with 'array' and 'dataframe'
respectively. If the data is a string, it is interpreted as a file, and
the file extension is used to infer the file format. The explicit
options are:
- "auto"
- "array"
- "dict"
- "sarray"
- "dataframe"
- "csv"
- "tsv"
- "sframe".
See Also
--------
read_csv:
Create a new SFrame from a csv file. Preferred for text and CSV formats,
because it has a lot more options for controlling the parser.
save : Save an SFrame for later use.
Notes
-----
- When reading from HDFS on Linux we must guess the location of your java
installation. By default, we will use the location pointed to by the
JAVA_HOME environment variable. If this is not set, we check many common
installation paths. You may use two environment variables to override
this behavior. GRAPHLAB_JAVA_HOME allows you to specify a specific java
installation and overrides JAVA_HOME. GRAPHLAB_LIBJVM_DIRECTORY
overrides all and expects the exact directory that your preferred
libjvm.so file is located. Use this ONLY if you'd like to use a
non-standard JVM.
Examples
--------
>>> import graphlab
>>> from graphlab import SFrame
**Construction**
Construct an SFrame from a dataframe and transfers the dataframe object
across the network.
>>> df = pandas.DataFrame()
>>> sf = SFrame(data=df)
Construct an SFrame from a local csv file (only works for local server).
>>> sf = SFrame(data='~/mydata/foo.csv')
Construct an SFrame from a csv file on Amazon S3. This requires the
environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be
set before the python session started. Alternatively, you can use
:py:func:`graphlab.aws.set_credentials()` to set the credentials after
python is started and :py:func:`graphlab.aws.get_credentials()` to verify
these environment variables.
>>> sf = SFrame(data='s3://mybucket/foo.csv')
Read from HDFS using a specific java installation (environment variable
only applies when using Linux)
>>> import os
>>> os.environ['GRAPHLAB_JAVA_HOME'] = '/my/path/to/java'
>>> from graphlab import SFrame
>>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt")
An SFrame can be constructed from a dictionary of values or SArrays:
>>> sf = gl.SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
Or equivalently:
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame({'id':ids,'val':vals})
It can also be constructed from an array of SArrays in which case column
names are automatically assigned.
>>> ids = SArray([1,2,3])
>>> vals = SArray(['A','B','C'])
>>> sf = SFrame([ids, vals])
>>> sf
Columns:
X1 int
X2 str
Rows: 3
Data:
X1 X2
0 1 A
1 2 B
2 3 C
If the SFrame is constructed from a list of values, an SFrame of a single
column is constructed.
>>> sf = SFrame([1,2,3])
>>> sf
Columns:
X1 int
Rows: 3
Data:
X1
0 1
1 2
2 3
**Parsing**
The :py:func:`graphlab.SFrame.read_csv()` is quite powerful and, can be
used to import a variety of row-based formats.
First, some simple cases:
>>> !cat ratings.csv
user_id,movie_id,rating
10210,1,1
10213,2,5
10217,2,2
10102,1,3
10109,3,4
10117,5,2
10122,2,4
10114,1,5
10125,1,1
>>> gl.SFrame.read_csv('ratings.csv')
Columns:
user_id int
movie_id int
rating int
Rows: 9
Data:
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 10210 | 1 | 1 |
| 10213 | 2 | 5 |
| 10217 | 2 | 2 |
| 10102 | 1 | 3 |
| 10109 | 3 | 4 |
| 10117 | 5 | 2 |
| 10122 | 2 | 4 |
| 10114 | 1 | 5 |
| 10125 | 1 | 1 |
+---------+----------+--------+
[9 rows x 3 columns]
Delimiters can be specified, if "," is not the delimiter, for instance
space ' ' in this case. Only single character delimiters are supported.
>>> !cat ratings.csv
user_id movie_id rating
10210 1 1
10213 2 5
10217 2 2
10102 1 3
10109 3 4
10117 5 2
10122 2 4
10114 1 5
10125 1 1
>>> gl.SFrame.read_csv('ratings.csv', delimiter=' ')
By default, "NA" or a missing element are interpreted as missing values.
>>> !cat ratings2.csv
user,movie,rating
"tom",,1
harry,5,
jack,2,2
bill,,
>>> gl.SFrame.read_csv('ratings2.csv')
Columns:
user str
movie int
rating int
Rows: 4
Data:
+---------+-------+--------+
| user | movie | rating |
+---------+-------+--------+
| tom | None | 1 |
| harry | 5 | None |
| jack | 2 | 2 |
| missing | None | None |
+---------+-------+--------+
[4 rows x 3 columns]
Furthermore due to the dictionary types and list types, can handle parsing
of JSON-like formats.
>>> !cat ratings3.csv
business, categories, ratings
"Restaurant 1", [1 4 9 10], {"funny":5, "cool":2}
"Restaurant 2", [], {"happy":2, "sad":2}
"Restaurant 3", [2, 11, 12], {}
>>> gl.SFrame.read_csv('ratings3.csv')
Columns:
business str
categories array
ratings dict
Rows: 3
Data:
+--------------+--------------------------------+-------------------------+
| business | categories | ratings |
+--------------+--------------------------------+-------------------------+
| Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} |
| Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} |
| Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} |
+--------------+--------------------------------+-------------------------+
[3 rows x 3 columns]
The list and dictionary parsers are quite flexible and can absorb a
variety of purely formatted inputs. Also, note that the list and dictionary
types are recursive, allowing for arbitrary values to be contained.
All these are valid lists:
>>> !cat interesting_lists.csv
list
[]
[1,2,3]
[1;2,3]
[1 2 3]
[{a:b}]
["c",d, e]
[[a]]
>>> gl.SFrame.read_csv('interesting_lists.csv')
Columns:
list list
Rows: 7
Data:
+-----------------+
| list |
+-----------------+
| [] |
| [1, 2, 3] |
| [1, 2, 3] |
| [1, 2, 3] |
| [{'a': 'b'}] |
| ['c', 'd', 'e'] |
| [['a']] |
+-----------------+
[7 rows x 1 columns]
All these are valid dicts:
>>> !cat interesting_dicts.csv
dict
{"classic":1,"dict":1}
{space:1 seperated:1}
{emptyvalue:}
{}
{:}
{recursive1:[{a:b}]}
{:[{:[a]}]}
>>> gl.SFrame.read_csv('interesting_dicts.csv')
Columns:
dict dict
Rows: 7
Data:
+------------------------------+
| dict |
+------------------------------+
| {'dict': 1, 'classic': 1} |
| {'seperated': 1, 'space': 1} |
| {'emptyvalue': None} |
| {} |
| {None: None} |
| {'recursive1': [{'a': 'b'}]} |
| {None: [{None: array('d')}]} |
+------------------------------+
[7 rows x 1 columns]
**Saving**
Save and load the sframe in native format.
>>> sf.save('mysframedir')
>>> sf2 = graphlab.load_sframe('mysframedir')
**Column Manipulation**
An SFrame is composed of a collection of columns of SArrays, and individual
SArrays can be extracted easily. For instance given an SFrame:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The "id" column can be extracted using:
>>> sf["id"]
dtype: int
Rows: 3
[1, 2, 3]
And can be deleted using:
>>> del sf["id"]
Multiple columns can be selected by passing a list of column names:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]})
>>> sf
Columns:
id int
val str
val2 int
Rows: 3
Data:
id val val2
0 1 A 5
1 2 B 6
2 3 C 7
>>> sf2 = sf[['id','val']]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
You can also select columns using types or a list of types:
>>> sf2 = sf[int]
>>> sf2
Columns:
id int
val2 int
Rows: 3
Data:
id val2
0 1 5
1 2 6
2 3 7
Or a mix of types and names:
>>> sf2 = sf[['id', str]]
>>> sf2
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
The same mechanism can be used to re-order columns:
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[['val','id']]
>>> sf
Columns:
val str
id int
Rows: 3
Data:
val id
0 A 1
1 B 2
2 C 3
**Element Access and Slicing**
SFrames can be accessed by integer keys just like a regular python list.
Such operations may not be fast on large datasets so looping over an SFrame
should be avoided.
>>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']})
>>> sf[0]
{'id': 1, 'val': 'A'}
>>> sf[2]
{'id': 3, 'val': 'C'}
>>> sf[5]
IndexError: SFrame index out of range
Negative indices can be used to access elements from the tail of the array
>>> sf[-1] # returns the last element
{'id': 3, 'val': 'C'}
>>> sf[-2] # returns the second to last element
{'id': 2, 'val': 'B'}
The SFrame also supports the full range of python slicing operators:
>>> sf[1000:] # Returns an SFrame containing rows 1000 to the end
>>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive
>>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2
>>> sf[-100:] # Returns an SFrame containing last 100 rows
>>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2
**Logical Filter**
An SFrame can be filtered using
>>> sframe[binary_filter]
where sframe is an SFrame and binary_filter is an SArray of the same length.
The result is a new SFrame which contains only rows of the SFrame where its
matching row in the binary_filter is non zero.
This permits the use of boolean operators that can be used to perform
logical filtering operations. For instance, given an SFrame
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
>>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
See :class:`~graphlab.SArray` for more details on the use of the logical
filter.
This can also be used more generally to provide filtering capability which
is otherwise not expressible with simple boolean functions. For instance:
>>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)]
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
Or alternatively:
>>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)]
Create an SFrame from a Python dictionary.
>>> from graphlab import SFrame
>>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf
Columns:
id int
val str
Rows: 3
Data:
id val
0 1 A
1 2 B
2 3 C
"""
__slots__ = ['_proxy', '_cache']
__construct_ctr = int(time.time()) % 1000
def __init__(self, data=None,
format='auto',
_proxy=None):
"""__init__(data=list(), format='auto')
Construct a new SFrame from a url or a pandas.DataFrame.
"""
# emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http)
SFrame.__construct_ctr += 1
if SFrame.__construct_ctr % 1000 == 0:
_mt._get_metric_tracker().track('sframe.init1000')
if (_proxy):
self.__proxy__ = _proxy
else:
self.__proxy__ = UnitySFrameProxy(glconnect.get_client())
_format = None
if (format == 'auto'):
if (HAS_PANDAS and isinstance(data, pandas.DataFrame)):
_format = 'dataframe'
_mt._get_metric_tracker().track('sframe.location.memory', value=1)
elif (isinstance(data, str) or
(sys.version_info.major < 3 and isinstance(data, unicode))):
if data.find('://') == -1:
suffix = 'local'
else:
suffix = data.split('://')[0]
if data.endswith(('.csv', '.csv.gz')):
_format = 'csv'
elif data.endswith(('.tsv', '.tsv.gz')):
_format = 'tsv'
elif data.endswith(('.txt', '.txt.gz')):
print("Assuming file is csv. For other delimiters, " + \
"please use `SFrame.read_csv`.")
_format = 'csv'
else:
_format = 'sframe'
elif type(data) == SArray:
_format = 'sarray'
elif isinstance(data, SFrame):
_format = 'sframe_obj'
elif isinstance(data, dict):
_format = 'dict'
elif _is_non_string_iterable(data):
_format = 'array'
elif data is None:
_format = 'empty'
else:
raise ValueError('Cannot infer input type for data ' + str(data))
else:
_format = format
with cython_context():
if (_format == 'dataframe'):
for c in data.columns.values:
self.add_column(SArray(data[c].values), str(c))
elif (_format == 'sframe_obj'):
for col in data.column_names():
self.__proxy__.add_column(data[col].__proxy__, col)
elif (_format == 'sarray'):
self.__proxy__.add_column(data.__proxy__, '')
elif (_format == 'array'):
if len(data) > 0:
unique_types = set([type(x) for x in data if x is not None])
if len(unique_types) == 1 and SArray in unique_types:
for arr in data:
self.add_column(arr)
elif SArray in unique_types:
raise ValueError("Cannot create SFrame from mix of regular values and SArrays")
else:
self.__proxy__.add_column(SArray(data).__proxy__, '')
elif (_format == 'dict'):
# Validate that every column is the same length.
if len(set(len(value) for value in data.values())) > 1:
# probably should be a value error. But we used to raise
# runtime error here...
raise RuntimeError("All column should be of the same length")
# split into SArray values and other iterable values.
# We convert the iterable values in bulk, and then add the sarray values as columns
sarray_keys = sorted(key for key,value in six.iteritems(data) if isinstance(value, SArray))
self.__proxy__.load_from_dataframe({key:value for key,value in six.iteritems(data) if not isinstance(value, SArray)})
for key in sarray_keys:
self.__proxy__.add_column(data[key].__proxy__, key)
elif (_format == 'csv'):
url = data
tmpsf = SFrame.read_csv(url, delimiter=',', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'tsv'):
url = data
tmpsf = SFrame.read_csv(url, delimiter='\t', header=True)
self.__proxy__ = tmpsf.__proxy__
elif (_format == 'sframe'):
url = _make_internal_url(data)
self.__proxy__.load_from_sframe_index(url)
elif (_format == 'empty'):
pass
else:
raise ValueError('Unknown input type: ' + format)
sframe_size = -1
if self.__has_size__():
sframe_size = self.num_rows()
@staticmethod
def _infer_column_types_from_lines(first_rows):
if (len(first_rows.column_names()) < 1):
print("Insufficient number of columns to perform type inference")
raise RuntimeError("Insufficient columns ")
if len(first_rows) < 1:
print("Insufficient number of rows to perform type inference")
raise RuntimeError("Insufficient rows")
# gets all the values column-wise
all_column_values_transposed = [list(first_rows[col])
for col in first_rows.column_names()]
# transpose
all_column_values = [list(x) for x in list(zip(*all_column_values_transposed))]
all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values]
# collect the hints
# if every line was inferred to have a different number of elements, die
if len(set(len(x) for x in all_column_type_hints)) != 1:
print("Unable to infer column types. Defaulting to str")
return str
import types
column_type_hints = all_column_type_hints[0]
# now perform type combining across rows
for i in range(1, len(all_column_type_hints)):
currow = all_column_type_hints[i]
for j in range(len(column_type_hints)):
# combine types
d = set([currow[j], column_type_hints[j]])
if (len(d) == 1):
# easy case. both agree on the type
continue
if (((long in d) or (int in d)) and (float in d)):
# one is an int, one is a float. its a float
column_type_hints[j] = float
elif ((array.array in d) and (list in d)):
# one is an array , one is a list. its a list
column_type_hints[j] = list
elif type(None) in d:
# one is a NoneType. assign to other type
if currow[j] != type(None):
column_type_hints[j] = currow[j]
else:
column_type_hints[j] = str
# final pass. everything whih is still NoneType is now a str
for i in range(len(column_type_hints)):
if column_type_hints[i] == type(None):
column_type_hints[i] = str
return column_type_hints
@classmethod
def _read_csv_impl(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator="\n",
usecols=[],
nrows=None,
skiprows=0,
verbose=True,
store_errors=True,
nrows_to_infer=100,
**kwargs):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and optionally
(if store_errors=True) a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
store_errors : bool
If true, the output errors dict will be filled.
See `read_csv` for the rest of the parameters.
"""
# Pandas argument compatibility
if "sep" in kwargs:
delimiter = kwargs['sep']
del kwargs['sep']
if "quotechar" in kwargs:
quote_char = kwargs['quotechar']
del kwargs['quotechar']
if "doublequote" in kwargs:
double_quote = kwargs['doublequote']
del kwargs['doublequote']
if "comment" in kwargs:
comment_char = kwargs['comment']
del kwargs['comment']
if comment_char is None:
comment_char = ''
if "lineterminator" in kwargs:
line_terminator = kwargs['lineterminator']
del kwargs['lineterminator']
if len(kwargs) > 0:
raise TypeError("Unexpected keyword arguments " + str(kwargs.keys()))
parsing_config = dict()
parsing_config["delimiter"] = delimiter
parsing_config["use_header"] = header
parsing_config["continue_on_failure"] = not error_bad_lines
parsing_config["comment_char"] = comment_char
parsing_config["escape_char"] = escape_char
parsing_config["double_quote"] = double_quote
parsing_config["quote_char"] = quote_char
parsing_config["skip_initial_space"] = skip_initial_space
parsing_config["store_errors"] = store_errors
parsing_config["line_terminator"] = line_terminator
parsing_config["output_columns"] = usecols
parsing_config["skip_rows"] =skiprows
if type(na_values) is str:
na_values = [na_values]
if na_values is not None and len(na_values) > 0:
parsing_config["na_values"] = na_values
if nrows != None:
parsing_config["row_limit"] = nrows
proxy = UnitySFrameProxy(glconnect.get_client())
internal_url = _make_internal_url(url)
# Attempt to automatically detect the column types. Either produce a
# list of types; otherwise default to all str types.
column_type_inference_was_used = False
if column_type_hints is None:
try:
# Get the first nrows_to_infer rows (using all the desired arguments).
first_rows = SFrame.read_csv(url, nrows=nrows_to_infer,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
skiprows=skiprows,
verbose=verbose)
column_type_hints = SFrame._infer_column_types_from_lines(first_rows)
typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']'
if verbose != False:
print("------------------------------------------------------")
print("Inferred types from first %d line(s) of file as " % nrows_to_infer)
print("column_type_hints="+ typelist)
print("If parsing fails due to incorrect types, you can correct")
print("the inferred type list above and pass it to read_csv in")
print( "the column_type_hints argument")
print("------------------------------------------------------")
column_type_inference_was_used = True
except RuntimeError as e:
if type(e) == RuntimeError and ("cancel" in str(e.args[0]) or "Cancel" in str(e.args[0])):
raise e
# If the above fails, default back to str for all columns.
column_type_hints = str
if verbose != False:
print('Could not detect types. Using str for each column.')
if type(column_type_hints) is type:
type_hints = {'__all_columns__': column_type_hints}
elif type(column_type_hints) is list:
type_hints = dict(list(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints)))
elif type(column_type_hints) is dict:
# we need to fill in a potentially incomplete dictionary
try:
# Get the first nrows_to_infer rows (using all the desired arguments).
first_rows = SFrame.read_csv(url, nrows=nrows_to_infer,
column_type_hints=type(None),
header=header,
delimiter=delimiter,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
skiprows=skiprows,
verbose=verbose)
inferred_types = SFrame._infer_column_types_from_lines(first_rows)
# make a dict of column_name to type
inferred_types = dict(list(zip(first_rows.column_names(), inferred_types)))
# overwrite with the user's specified types
for key in column_type_hints:
inferred_types[key] = column_type_hints[key]
column_type_hints = inferred_types
except RuntimeError as e:
if type(e) == RuntimeError and ("cancel" in e.message or "Cancel" in e.message):
raise e
# If the above fails, default back to str for unmatched columns
if verbose != False:
print('Could not detect types. Using str for all unspecified columns.')
type_hints = column_type_hints
else:
raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.")
suffix=''
if url.find('://') == -1:
suffix = 'local'
else:
suffix = url.split('://')[0]
try:
if (not verbose):
glconnect.get_server().set_log_progress(False)
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except Exception as e:
if type(e) == RuntimeError and "CSV parsing cancelled" in str(e.args[0]):
raise e
if column_type_inference_was_used:
# try again
if verbose != False:
print("Unable to parse the file with automatic type inference.")
print("Defaulting to column_type_hints=str")
type_hints = {'__all_columns__': str}
try:
with cython_context():
errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints)
except:
glconnect.get_server().set_log_progress(True)
raise
else:
glconnect.get_server().set_log_progress(True)
raise
glconnect.get_server().set_log_progress(True)
return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.items() })
@classmethod
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator='\n',
usecols = [],
nrows=None,
skiprows=0,
verbose=True,
nrows_to_infer=100,
**kwargs):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will be type inferred.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
line_terminator : str, optional
A string to be interpreted as the line terminator. Defaults to "\\n"
which will also correctly match Mac, Linux and Windows line endings
("\\r", "\\n" and "\\r\\n" respectively)
usecols : list of str, optional
A subset of column names to output. If unspecified (default),
all columns will be read. This can provide performance gains if the
number of columns are large. If the input file has no headers,
usecols=['X1','X3'] will read columns 1 and 3.
nrows : int, optional
If set, only this many rows will be read from the file.
skiprows : int, optional
If set, this number of rows at the start of the file are skipped.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv'
>>> (sf, bad_lines) = graphlab.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://static.turi.com/datasets/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
nrows=nrows,
verbose=verbose,
skiprows=skiprows,
store_errors=True,
nrows_to_infer=nrows_to_infer,
**kwargs)
@classmethod
def read_csv(cls,
url,
delimiter=',',
header=True,
error_bad_lines=False,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator='\n',
usecols=[],
nrows=None,
skiprows=0,
verbose=True,
nrows_to_infer=100,
**kwargs):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names : 'X1, X2, ...'.
error_bad_lines : bool
If true, will fail upon encountering a bad line. If false, will
continue parsing skipping lines which fail to parse correctly.
A sample of the first 10 encountered bad lines will be printed.
comment_char : string, optional
The character which denotes that the remainder of the line is a
comment.
escape_char : string, optional
Character which begins a C escape sequence
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will be type inferred.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
line_terminator : str, optional
A string to be interpreted as the line terminator. Defaults to "\n"
which will also correctly match Mac, Linux and Windows line endings
("\\r", "\\n" and "\\r\\n" respectively)
usecols : list of str, optional
A subset of column names to output. If unspecified (default),
all columns will be read. This can provide performance gains if the
number of columns are large. If the input file has no headers,
usecols=['X1','X3'] will read columns 1 and 3.
nrows : int, optional
If set, only this many rows will be read from the file.
skiprows : int, optional
If set, this number of rows at the start of the file are skipped.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : SFrame
See Also
--------
read_csv_with_errors, SFrame
Examples
--------
Read a regular csv file, with all default options, automatically
determine types:
>>> url = 'https://static.turi.com/datasets/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Read only the first 100 lines of the csv file:
>>> sf = graphlab.SFrame.read_csv(url, nrows=100)
>>> sf
Columns:
user_id int
movie_id int
rating int
Rows: 100
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[100 rows x 3 columns]
Read all columns as str type
>>> sf = graphlab.SFrame.read_csv(url, column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Specify types for a subset of columns and leave the rest to be str.
>>> sf = graphlab.SFrame.read_csv(url,
... column_type_hints={
... 'user_id':int, 'rating':float
... })
>>> sf
Columns:
user_id str
movie_id str
rating float
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3.0 |
| 25907 | 1663 | 3.0 |
| 25923 | 1663 | 3.0 |
| 25924 | 1663 | 3.0 |
| 25928 | 1663 | 2.0 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Not treat first line as header:
>>> sf = graphlab.SFrame.read_csv(url, header=False)
>>> sf
Columns:
X1 str
X2 str
X3 str
Rows: 10001
+---------+----------+--------+
| X1 | X2 | X3 |
+---------+----------+--------+
| user_id | movie_id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10001 rows x 3 columns]
Treat '3' as missing value:
>>> sf = graphlab.SFrame.read_csv(url, na_values=['3'], column_type_hints=str)
>>> sf
Columns:
user_id str
movie_id str
rating str
Rows: 10000
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | None |
| 25907 | 1663 | None |
| 25923 | 1663 | None |
| 25924 | 1663 | None |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Throw error on parse failure:
>>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv'
>>> sf = graphlab.SFrame.read_csv(bad_url, error_bad_lines=True)
RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c"
Set error_bad_lines=False to skip bad lines
"""
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=error_bad_lines,
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
nrows=nrows,
skiprows=skiprows,
verbose=verbose,
store_errors=False,
nrows_to_infer=nrows_to_infer,
**kwargs)[0]
@classmethod
def read_json(cls,
url,
orient='records'):
"""
Reads a JSON file representing a table into an SFrame.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
orient : string, optional. Either "records" or "lines"
If orient="records" the file is expected to contain a single JSON
array, where each array element is a dictionary. If orient="lines",
the file is expected to contain a JSON element per line.
Examples
--------
The orient parameter describes the expected input format of the JSON
file.
If orient="records", the JSON file is expected to contain a single
JSON Array where each array element is a dictionary describing the row.
For instance:
>>> !cat input.json
[{'a':1,'b':1}, {'a':2,'b':2}, {'a':3,'b':3}]
>>> SFrame.read_json('input.json', orient='records')
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
If orient="lines", the JSON file is expected to contain a JSON element
per line. If each line contains a dictionary, it is automatically
unpacked.
>>> !cat input.json
{'a':1,'b':1}
{'a':2,'b':2}
{'a':3,'b':3}
>>> g = SFrame.read_json('input.json', orient='lines')
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
If the lines are not dictionaries, the original format is maintained.
>>> !cat input.json
['a','b','c']
['d','e','f']
['g','h','i']
[1,2,3]
>>> g = SFrame.read_json('input.json', orient='lines')
Columns:
X1 list
Rows: 3
Data:
+-----------+
| X1 |
+-----------+
| [a, b, c] |
| [d, e, f] |
| [g, h, i] |
+-----------+
[3 rows x 1 columns]
"""
if orient == "records":
g = cls.read_csv(url, line_terminator='',
header=False, column_type_hints=list)
if g.num_cols() != 1:
raise RuntimeError("Input JSON not of expected format")
return g.stack('X1').unpack('X1','')
elif orient == "lines":
g = cls.read_csv(url, header=False)
if g.num_cols() != 1:
raise RuntimeError("Input JSON not of expected format")
if g['X1'].dtype() == dict:
return g.unpack('X1','')
else:
return g
else:
raise ValueError("Invalid value for orient parameter (" + str(orient) + ")")
@classmethod
def __get_graphlabutil_reference_on_spark_unity_jar(cls,sc):
'''
A utility function to get a handle to GraphLabUtil object in jar file.
This utility function dynamically loads the GraphLabUtil class and call
factory method ``getUtil()`` that returns a GraphLabUtil object.
Parameters
----------
sc : SparkContext
Current SparkContext.
'''
global SFRAME_GRAPHLABUTIL_REF
if SFRAME_GRAPHLABUTIL_REF is None:
jar_path = get_spark_integration_jar_path()
jar_file = sc._jvm.java.io.File(jar_path)
jar_url = jar_file.toURL()
url_array = sc._gateway.new_array(sc._jvm.java.net.URL,1)
url_array[0] = jar_url
child = sc._jvm.java.net.URLClassLoader(url_array)
load_class = child.loadClass("org.graphlab.create.GraphLabUtil")
method = load_class.getDeclaredMethod("getUtil",None)
SFRAME_GRAPHLABUTIL_REF = method.invoke(load_class,None)
return SFRAME_GRAPHLABUTIL_REF
def to_spark_dataframe(self,sc,sql,number_of_partitions=4):
"""
Convert the current SFrame to the Spark DataFrame.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
sql : SQLContext
sql is an existing SQLContext.
number_of_partitions : int
number of partitions for the output rdd
Returns
----------
out: pyspark.sql.DataFrame
Notes
-----
- Look at to_rdd()
- Look at from_rdd()
Examples
--------
>>> from pyspark import SparkContext, SQLContext
>>> from graphlab import SFrame
>>> from pyspark.sql import SQLContext
>>> sc = SparkContext('local')
>>> sql = SQLContext(sc)
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> df = sf.to_spark_dataframe(sc, sql)
>>> df.show()
x y
1 fish
2 chips
3 salad
"""
def homogeneous_type(seq):
if seq is None or len(seq) == 0:
return True
iseq = iter(seq)
first_type = type(next(iseq))
return True if all( (type(x) is first_type) for x in iseq ) else False
if len(self) == 0:
raise ValueError("SFrame is empty")
column_names = self.column_names()
first_row = self.head(1)[0]
for name in column_names:
if _is_non_string_iterable(first_row[name]) and not homogeneous_type(first_row[name]):
raise TypeError("Support for translation to Spark DataFrame not enabled for heterogeneous iterable type (column: %s). Use SFrame.to_rdd()." % name)
rdd = self.to_rdd(sc,number_of_partitions);
from pyspark.sql import Row
rowRdd = rdd.map(lambda x: Row(**x))
if(hasattr(sql,'createDataFrame')):
return sql.createDataFrame(rowRdd)
else:
return sql.inferSchema(rowRdd)
def to_rdd(self, sc, number_of_partitions=4):
"""
Convert the current SFrame to the Spark RDD.
Parameters
----------
sc : SparkContext
sc is an existing SparkContext.
number_of_partitions: int
number of partitions for the output rdd
Returns
----------
out: pyspark.rdd.RDD
Notes
----------
- Look at from_rdd().
- Look at to_spark_dataframe().
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> sf = SFrame({'x': [1,2,3], 'y': ['fish', 'chips', 'salad']})
>>> rdd = sf.to_rdd(sc)
>>> rdd.collect()
[{'x': 1L, 'y': 'fish'}, {'x': 2L, 'y': 'chips'}, {'x': 3L, 'y': 'salad'}]
"""
_mt._get_metric_tracker().track('sframe.to_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
for _type in self.column_types():
if(_type.__name__ == 'Image'):
raise TypeError("Support for translation to Spark RDDs not enabled for Image type.")
if number_of_partitions is None:
number_of_partitions = sc.defaultParallelism
if type(number_of_partitions) is not int:
raise ValueError("number_of_partitions parameter expects an integer type")
if number_of_partitions == 0:
raise ValueError("number_of_partitions can not be initialized to zero")
# get a handle to GraphLabUtil java object
graphlab_util_ref = self.__get_graphlabutil_reference_on_spark_unity_jar(sc)
# Save SFrame in a temporary place
tmp_loc = self.__get_staging_dir__(sc,graphlab_util_ref)
sf_loc = os.path.join(tmp_loc, str(uuid.uuid4()))
# Following substring replace is required to make
# to_rdd() works on Azure blob storage accounts.
if sf_loc.startswith("wasb://"):
sf_loc = sf_loc.replace(graphlab_util_ref.getHadoopNameNode(),"hdfs://")
self.save(sf_loc)
print(sf_loc)
# Keep track of the temporary sframe that is saved(). We need to delete it eventually.
dummysf = load_sframe(sf_loc)
dummysf.__proxy__.delete_on_close()
SFRAME_GARBAGE_COLLECTOR.append(dummysf)
# Run the spark job
javaRDD = graphlab_util_ref.pySparkToRDD(
sc._jsc.sc(),sf_loc,number_of_partitions,"")
from pyspark import RDD
import pyspark
output_rdd = RDD(javaRDD,sc,pyspark.serializers.PickleSerializer())
return output_rdd
@classmethod
def __get_staging_dir__(cls,cur_sc,graphlab_util_ref):
if not RDD_SUPPORT_INITED:
__rdd_support_init__(cur_sc,graphlab_util_ref)
return STAGING_DIR
@classmethod
def from_rdd(cls, rdd, cur_sc):
"""
Convert a Spark RDD into an SFrame.
Parameters
----------
rdd : pyspark.rdd.RDD or pyspark.sql.DataFrame
The input Spark RDD that is going to be converted to an SFrame.
cur_sc : SparkContext
An instance object of an SparkContext.
Returns
-------
out : SFrame
Notes
----------
- look at to_rdd().
- look at to_spark_dataframe().
Examples
--------
>>> from pyspark import SparkContext
>>> from graphlab import SFrame
>>> sc = SparkContext('local')
>>> rdd = sc.parallelize([1,2,3])
>>> sf = SFrame.from_rdd(rdd, sc)
>>> sf
Data:
+-----+
| X1 |
+-----+
| 1.0 |
| 2.0 |
| 3.0 |
+-----+
[3 rows x 1 columns]
"""
_mt._get_metric_tracker().track('sframe.from_rdd')
if not RDD_SUPPORT:
raise Exception("Support for translation to Spark RDDs not enabled.")
jrdd_deserializer = None
from pyspark import RDD
from pyspark.sql import DataFrame
if isinstance(rdd,DataFrame):
jrdd_deserializer = rdd.rdd._jrdd_deserializer
elif isinstance(rdd,RDD):
jrdd_deserializer = rdd._jrdd_deserializer
else:
raise RuntimeError("RDD type " + rdd.__class__.__name__ + " is currently unsupported.")
# Determine the encoding of the RDD
encoding = "batch" # default encoding in spark is batch
if(jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
encoding = "pickle"
elif(jrdd_deserializer.__class__.__name__ == 'UTF8Deserializer'):
encoding = "utf8"
# get a handle to GraphLabUtil java object
graphlab_util_ref = SFrame.__get_graphlabutil_reference_on_spark_unity_jar(cur_sc)
# Prep the sframe environment
tmp_loc = SFrame.__get_staging_dir__(cur_sc,graphlab_util_ref)
if tmp_loc is None:
raise RuntimeError("Could not determine staging directory for SFrame files.")
# directory to stage the SFrame
outputDir = tmp_loc
# prefix for the final SFrame name. This would normally be a meaningful name
# however since this sframe will immediately be opened we use a random name
finalSFramePrefix = str(uuid.uuid4())
# run the spark job which returns the filename of the final sframe index
if isinstance(rdd,DataFrame):
# if its a dataframe use the special java code path.
df = rdd._jdf
finalSFrameFilename = graphlab_util_ref.toSFrame(
df, tmp_loc, finalSFramePrefix)
else:
if encoding == 'utf8':
## TODO: This is a temporary solution. Here we are completely bypassing
## toSFrame() codepath when encoding is 'utf8'. This is because of Spark1.5 error
## for closure cleaning issue on deep nested functions.
def f(iterator):
for obj in iterator:
yield obj.encode("utf-8")
rdd = rdd.mapPartitions(f)
encoding = "batch"
if(rdd._jrdd_deserializer.__class__.__name__ == 'PickleSerializer'):
encoding = "pickle"
#finalSFrameFilename = graphlab_util_ref.toSFrame(
# rdd._jrdd.rdd(),tmp_loc, finalSFramePrefix)
#else:
# Prep the additional arguments to feed into the pySparkToSFrame function in Java
# that will call the spark_unity binary which does the actual encoding
additiona_args = os.path.join(" --encoding=%s " % encoding +\
" --type=rdd ")
finalSFrameFilename = graphlab_util_ref.pySparkToSFrame(
rdd._jrdd, tmp_loc, finalSFramePrefix, additiona_args)
# Load and return the sframe
sf = SFrame()
sf.__proxy__.load_from_sframe_index(_make_internal_url(finalSFrameFilename))
return sf
@classmethod
def from_odbc(cls, db, sql, verbose=False):
"""
Convert a table or query from a database to an SFrame.
This function does not do any checking on the given SQL query, and
cannot know what effect it will have on the database. Any side effects
from the query will be reflected on the database. If no result
rows are returned, an empty SFrame is created.
Keep in mind the default case your database stores table names in. In
some cases, you may need to add quotation marks (or whatever character
your database uses to quote identifiers), especially if you created the
table using `to_odbc`.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
sql : str
A SQL query. The query must be acceptable by the ODBC driver used by
`graphlab.extensions._odbc_connection.unity_odbc_connection`.
Returns
-------
out : SFrame
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
does not apply to the machine your database is running, which can (and
often will) be running on a separate machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> a_table = graphlab.SFrame.from_odbc(db, "SELECT * FROM a_table")
>>> join_result = graphlab.SFrame.from_odbc(db, 'SELECT * FROM "MyTable" a, "AnotherTable" b WHERE a.id=b.id')
"""
_mt._get_metric_tracker().track('sframe.from_odbc')
result = db.execute_query(sql)
if not isinstance(result, SFrame):
raise RuntimeError("Cannot create an SFrame for query. No result set.")
cls = result
return cls
def to_odbc(self, db, table_name, append_if_exists=False, verbose=True):
"""
Convert an SFrame to a table in a database.
By default, searches for a table in the database with the given name.
If found, this will attempt to append all the rows of the SFrame to the
end of the table. If not, this will create a new table with the given
name. This behavior is toggled with the `append_if_exists` flag.
When creating a new table, GraphLab Create uses a heuristic approach to
pick a corresponding type for each column in the SFrame using the type
information supplied by the database's ODBC driver. Your driver must
support giving this type information for GraphLab Create to support
writing to the database.
To allow more expressive and accurate naming, `to_odbc` puts quotes
around each identifier (table names and column names). Depending on
your database, you may need to refer to the created table with quote
characters around the name. This character is not the same for all
databases, but '"' is the most common.
Parameters
----------
db : `graphlab.extensions._odbc_connection.unity_odbc_connection`
An ODBC connection object. This can only be obtained by calling
`graphlab.connect_odbc`. Check that documentation for how to create
this object.
table_name : str
The name of the table you would like to create/append to.
append_if_exists : bool
If True, this will attempt to append to the table named `table_name`
if it is found to exist in the database.
verbose : bool
Print progress updates on the insertion process.
Notes
-----
This functionality is only supported when using GraphLab Create
entirely on your local machine. Therefore, GraphLab Create's EC2 and
Hadoop execution modes will not be able to use ODBC. Note that this
"local machine" rule does not apply to the machine your database is
running on, which can (and often will) be running on a separate
machine.
Examples
--------
>>> db = graphlab.connect_odbc("DSN=my_awesome_dsn;UID=user;PWD=mypassword")
>>> sf = graphlab.SFrame({'a':[1,2,3],'b':['hi','pika','bye']})
>>> sf.to_odbc(db, 'a_cool_table')
"""
_mt._get_metric_tracker().track('sframe.to_odbc')
if (not verbose):
glconnect.get_server().set_log_progress(False)
db._insert_sframe(self, table_name, append_if_exists)
if (not verbose):
glconnect.get_server().set_log_progress(True)
@classmethod
def from_sql(cls, conn, sql_statement, params=None, type_inference_rows=100,
dbapi_module=None, column_type_hints=None, cursor_arraysize=128):
"""
Convert the result of a SQL database query to an SFrame.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
sql_statement : str
The query to be sent to the database through the given connection.
No checks are performed on the `sql_statement`. Any side effects from
the query will be reflected on the database. If no result rows are
returned, an empty SFrame is created.
params : iterable | dict, optional
Parameters to substitute for any parameter markers in the
`sql_statement`. Be aware that the style of parameters may vary
between different DBAPI2 packages.
type_inference_rows : int, optional
The maximum number of rows to use for determining the column types of
the SFrame. These rows are held in Python until all column types are
determined or the maximum is reached.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
column_type_hints : dict | list | type, optional
Specifies the types of the output SFrame. If a dict is given, it must
have result column names as keys, but need not have all of the result
column names. If a list is given, the length of the list must match
the number of result columns. If a single type is given, all columns
in the output SFrame will be this type. If the result type is
incompatible with the types given in this argument, a casting error
will occur.
cursor_arraysize : int, optional
The number of rows to fetch from the database at one time.
Returns
-------
out : SFrame
Examples
--------
>>> import sqlite3
>>> conn = sqlite3.connect('example.db')
>>> graphlab.SFrame.from_sql(conn, "SELECT * FROM foo")
Columns:
a int
b int
Rows: 1
Data:
+---+---+
| a | b |
+---+---+
| 1 | 2 |
+---+---+
[1 rows x 2 columns]
"""
# Mapping types is always the trickiest part about reading from a
# database, so the main complexity of this function concerns types.
# Much of the heavy-lifting of this is done by the DBAPI2 module, which
# holds the burden of the actual mapping from the database-specific
# type to a suitable Python type. The problem is that the type that the
# module chooses may not be supported by SFrame, and SFrame needs a
# list of types to be created, so we must resort to guessing the type
# of a column if the query result returns lots of NULL values. The goal
# of these steps is to fail as little as possible first, and then
# preserve data as much as we can.
#
# Here is how the type for an SFrame column is chosen:
#
# 1. The column_type_hints parameter is checked.
#
# Each column specified in the parameter will be forced to the
# hinted type via a Python-side cast before it is given to the
# SFrame. Only int, float, and str are allowed to be hints.
#
# 2. The types returned from the cursor are checked.
#
# The first non-None result for each column is taken to be the type
# of that column. The type is checked for whether SFrame supports
# it, or whether it can convert to a supported type. If the type is
# supported, no Python-side cast takes place. If unsupported, the
# SFrame column is set to str and the values are casted in Python to
# str before being added to the SFrame.
#
# 3. DB type codes provided by module are checked
#
# This case happens for any column that only had None values in the
# first `type_inference_rows` rows. In this case we check the
# type_code in the cursor description for the columns missing types.
# These types often do not match up with an SFrame-supported Python
# type, so the utility of this step is limited. It can only result
# in labeling datetime.datetime, float, or str. If a suitable
# mapping isn't found, we fall back to str.
mod_info = _get_global_dbapi_info(dbapi_module, conn)
_mt._get_metric_tracker().track('sframe.from_sql',
properties={'module_name':mod_info['module_name']})
from .sframe_builder import SFrameBuilder
c = conn.cursor()
try:
if params is None:
c.execute(sql_statement)
else:
c.execute(sql_statement, params)
except mod_info['Error'] as e:
# The rollback method is considered optional by DBAPI2, but some
# modules that do implement it won't work again unless it is called
# if an error happens on a cursor.
if hasattr(conn, 'rollback'):
conn.rollback()
raise e
c.arraysize = cursor_arraysize
result_desc = c.description
result_names = [i[0] for i in result_desc]
result_types = [None for i in result_desc]
cols_to_force_cast = set()
temp_vals = []
# Set any types that are given to us
col_name_to_num = {result_names[i]:i for i in range(len(result_names))}
if column_type_hints is not None:
if type(column_type_hints) is dict:
for k,v in column_type_hints.items():
col_num = col_name_to_num[k]
cols_to_force_cast.add(col_num)
result_types[col_num] = v
elif type(column_type_hints) is list:
if len(column_type_hints) != len(result_names):
__LOGGER__.warn("If column_type_hints is specified as a "+\
"list, it must be of the same size as the result "+\
"set's number of columns. Ignoring (use dict instead).")
else:
result_types = column_type_hints
cols_to_force_cast.update(range(len(result_desc)))
elif type(column_type_hints) is type:
result_types = [column_type_hints for i in result_desc]
cols_to_force_cast.update(range(len(result_desc)))
# Since we will be casting whatever we receive to the types given
# before submitting the values to the SFrame, we need to make sure that
# these are types that a "cast" makes sense, and we're not calling a
# constructor that expects certain input (e.g. datetime.datetime),
# since we could get lots of different input
hintable_types = [int,float,str]
if not all([i in hintable_types or i is None for i in result_types]):
raise TypeError("Only " + str(hintable_types) + " can be provided as type hints!")
# Perform type inference by checking to see what python types are
# returned from the cursor
if not all(result_types):
# Only test the first fetch{one,many} command since the only way it
# will raise an exception is if execute didn't produce a result set
try:
row = c.fetchone()
except mod_info['Error'] as e:
if hasattr(conn, 'rollback'):
conn.rollback()
raise e
while row is not None:
# Assumes that things like dicts are not a "single sequence"
temp_vals.append(row)
val_count = 0
for val in row:
if result_types[val_count] is None and val is not None:
result_types[val_count] = type(val)
val_count += 1
if all(result_types) or len(temp_vals) >= type_inference_rows:
break
row = c.fetchone()
# This will be true if some columns have all missing values up to this
# point. Try using DBAPI2 type_codes to pick a suitable type. If this
# doesn't work, fall back to string.
if not all(result_types):
missing_val_cols = [i for i,v in enumerate(result_types) if v is None]
cols_to_force_cast.update(missing_val_cols)
inferred_types = infer_dbapi2_types(c, mod_info)
cnt = 0
for i in result_types:
if i is None:
result_types[cnt] = inferred_types[cnt]
cnt += 1
sb = SFrameBuilder(result_types, column_names=result_names)
unsupported_cols = [i for i,v in enumerate(sb.column_types()) if v is type(None)]
if len(unsupported_cols) > 0:
cols_to_force_cast.update(unsupported_cols)
for i in unsupported_cols:
result_types[i] = str
sb = SFrameBuilder(result_types, column_names=result_names)
sb.append_multiple(_force_cast_sql_types(temp_vals, result_types, cols_to_force_cast))
rows = c.fetchmany()
while len(rows) > 0:
sb.append_multiple(_force_cast_sql_types(rows, result_types, cols_to_force_cast))
rows = c.fetchmany()
cls = sb.close()
try:
c.close()
except mod_info['Error'] as e:
if hasattr(conn, 'rollback'):
conn.rollback()
raise e
return cls
def to_sql(self, conn, table_name, dbapi_module=None,
use_python_type_specifiers=False, use_exact_column_names=True):
"""
Convert an SFrame to a single table in a SQL database.
This function does not attempt to create the table or check if a table
named `table_name` exists in the database. It simply assumes that
`table_name` exists in the database and appends to it.
`to_sql` can be thought of as a convenience wrapper around
parameterized SQL insert statements.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
table_name : str
The name of the table to append the data in this SFrame.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
use_python_type_specifiers : bool, optional
If the DBAPI2 module's parameter marker style is 'format' or
'pyformat', attempt to use accurate type specifiers for each value
('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply
use 's' for all types if they use these parameter markers, so this is
False by default.
use_exact_column_names : bool, optional
Specify the column names of the SFrame when inserting its contents
into the DB. If the specified table does not have the exact same
column names as the SFrame, inserting the data will fail. If False,
the columns in the SFrame are inserted in order without care of the
schema of the DB table. True by default.
"""
mod_info = _get_global_dbapi_info(dbapi_module, conn)
_mt._get_metric_tracker().track('sframe.to_sql',
properties={'module_name':mod_info['module_name']})
c = conn.cursor()
col_info = list(zip(self.column_names(), self.column_types()))
if not use_python_type_specifiers:
pytype_to_printf = lambda x: 's'
# DBAPI2 standard allows for five different ways to specify parameters
sql_param = {
'qmark' : lambda name,col_num,col_type: '?',
'numeric' : lambda name,col_num,col_type:':'+str(col_num+1),
'named' : lambda name,col_num,col_type:':'+str(name),
'format' : lambda name,col_num,col_type:'%'+pytype_to_printf(col_type),
'pyformat': lambda name,col_num,col_type:'%('+str(name)+')'+pytype_to_printf(col_type),
}
get_sql_param = sql_param[mod_info['paramstyle']]
# form insert string
ins_str = "INSERT INTO " + str(table_name)
value_str = " VALUES ("
col_str = " ("
count = 0
for i in col_info:
col_str += i[0]
value_str += get_sql_param(i[0],count,i[1])
if count < len(col_info)-1:
col_str += ","
value_str += ","
count += 1
col_str += ")"
value_str += ")"
if use_exact_column_names:
ins_str += col_str
ins_str += value_str
# Some formats require values in an iterable, some a dictionary
if (mod_info['paramstyle'] == 'named' or\
mod_info['paramstyle'] == 'pyformat'):
prepare_sf_row = lambda x:x
else:
col_names = self.column_names()
prepare_sf_row = lambda x: [x[i] for i in col_names]
for i in self:
try:
c.execute(ins_str, prepare_sf_row(i))
except mod_info['Error'] as e:
if hasattr(conn, 'rollback'):
conn.rollback()
raise e
conn.commit()
c.close()
def __hash__(self):
'''
Because we override `__eq__` we need to implement this function in Python 3.
Just make it match default behavior in Python 2.
'''
return id(self) // 16
def __repr__(self):
"""
Returns a string description of the frame
"""
ret = self.__get_column_description__()
(is_empty, data_str) = self.__str_impl__()
if is_empty:
data_str = "\t[]"
if self.__has_size__():
ret = ret + "Rows: " + str(len(self)) + "\n\n"
else:
ret = ret + "Rows: Unknown" + "\n\n"
ret = ret + "Data:\n"
ret = ret + data_str
return ret
def __get_column_description__(self):
colnames = self.column_names()
coltypes = self.column_types()
ret = "Columns:\n"
if len(colnames) > 0:
for i in range(len(colnames)):
ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n"
ret = ret + "\n"
else:
ret = ret + "\tNone\n\n"
return ret
def __get_pretty_tables__(self, wrap_text=False, max_row_width=80,
max_column_width=30, max_columns=20,
max_rows_to_display=60):
"""
Returns a list of pretty print tables representing the current SFrame.
If the number of columns is larger than max_columns, the last pretty
table will contain an extra column of "...".
Parameters
----------
wrap_text : bool, optional
max_row_width : int, optional
Max number of characters per table.
max_column_width : int, optional
Max number of characters per column.
max_columns : int, optional
Max number of columns per table.
max_rows_to_display : int, optional
Max number of rows to display.
Returns
-------
out : list[PrettyTable]
"""
if (len(self) <= max_rows_to_display):
headsf = self.__copy__()
else:
headsf = self.head(max_rows_to_display)
if headsf.shape == (0, 0):
return [PrettyTable()]
# convert array.array column to list column so they print like [...]
# and not array('d', ...)
for col in headsf.column_names():
if headsf[col].dtype() is array.array:
headsf[col] = headsf[col].astype(list)
def _value_to_str(value):
if (type(value) is array.array):
return str(list(value))
elif (type(value) is list):
return '[' + ", ".join(_value_to_str(x) for x in value) + ']'
else:
return str(value)
def _escape_space(s):
if sys.version_info.major == 3:
return "".join([ch.encode('unicode_escape').decode() if ch.isspace() else ch for ch in s])
return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s])
def _truncate_respect_unicode(s, max_length):
if (len(s) <= max_length):
return s
else:
if sys.version_info.major < 3:
u = unicode(s, 'utf-8', errors='replace')
return u[:max_length].encode('utf-8')
else:
return s[:max_length]
def _truncate_str(s, wrap_str=False):
"""
Truncate and optionally wrap the input string as unicode, replace
unconvertible character with a diamond ?.
"""
s = _escape_space(s)
if len(s) <= max_column_width:
if sys.version_info.major < 3:
return unicode(s, 'utf-8', errors='replace')
else:
return s
else:
ret = ''
# if wrap_str is true, wrap the text and take at most 2 rows
if wrap_str:
wrapped_lines = wrap(s, max_column_width)
if len(wrapped_lines) == 1:
return wrapped_lines[0]
last_line = wrapped_lines[1]
if len(last_line) >= max_column_width:
last_line = _truncate_respect_unicode(last_line, max_column_width - 4)
ret = wrapped_lines[0] + '\n' + last_line + ' ...'
else:
ret = _truncate_respect_unicode(s, max_column_width - 4) + '...'
if sys.version_info.major < 3:
return unicode(ret, 'utf-8', errors='replace')
else:
return ret
columns = self.column_names()[:max_columns]
columns.reverse() # reverse the order of columns and we will pop from the end
num_column_of_last_table = 0
row_of_tables = []
# let's build a list of tables with max_columns
# each table should satisfy, max_row_width, and max_column_width
while len(columns) > 0:
tbl = PrettyTable()
table_width = 0
num_column_of_last_table = 0
while len(columns) > 0:
col = columns.pop()
# check the max length of element in the column
if len(headsf) > 0:
col_width = min(max_column_width, max(len(str(x)) for x in headsf[col]))
else:
col_width = max_column_width
if (table_width + col_width < max_row_width):
# truncate the header if necessary
header = _truncate_str(col, wrap_text)
tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]])
table_width = str(tbl).find('\n')
num_column_of_last_table += 1
else:
# the column does not fit in the current table, push it back to columns
columns.append(col)
break
tbl.align = 'c'
row_of_tables.append(tbl)
# add a column of all "..." if there are more columns than displayed
if self.num_cols() > max_columns:
row_of_tables[-1].add_column('...', ['...'] * len(headsf))
num_column_of_last_table += 1
# add a row of all "..." if there are more rows than displayed
if self.__has_size__() and self.num_rows() > headsf.num_rows():
row_of_tables[-1].add_row(['...'] * num_column_of_last_table)
return row_of_tables
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80, output_file=None):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
output_file: file, optional
The stream or file that receives the output. By default the output
goes to sys.stdout, but it can also be redirected to a file or a
string (using an object of type StringIO).
See Also
--------
head, tail
"""
if output_file is None:
output_file = sys.stdout
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print('\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer, file=output_file)
def _imagecols_to_stringcols(self, num_rows=10):
# A list of column types
types = self.column_types()
# A list of indexable column names
names = self.column_names()
# Constructing names of sframe columns that are of image type
image_column_names = [names[i] for i in range(len(names)) if types[i] == _Image]
#If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string
printed_sf = self.__copy__()
if len(image_column_names) > 0:
for t in names:
if t in image_column_names:
printed_sf[t] = self[t].astype(str)
return printed_sf.head(num_rows)
def __str_impl__(self, num_rows=10, footer=True):
"""
Returns a string containing the first num_rows elements of the frame, along
with a description of the frame.
"""
MAX_ROWS_TO_DISPLAY = num_rows
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY)
is_empty = len(printed_sf) == 0
if (not footer):
return (is_empty, '\n'.join([str(tb) for tb in row_of_tables]))
if self.__has_size__():
footer = '[%d rows x %d columns]\n' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '\n'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]\n' % self.num_columns()
footer += '\n'.join(LAZY_FOOTER_STRS)
return (is_empty, '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer)
def __str__(self, num_rows=10, footer=True):
"""
Returns a string containing the first 10 elements of the frame, along
with a description of the frame.
"""
return self.__str_impl__(num_rows, footer)[1]
def _repr_html_(self):
MAX_ROWS_TO_DISPLAY = 10
printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True,
max_row_width=120,
max_columns=40,
max_column_width=25,
max_rows_to_display=MAX_ROWS_TO_DISPLAY)
if self.__has_size__():
footer = '[%d rows x %d columns]<br/>' % self.shape
if (self.num_rows() > MAX_ROWS_TO_DISPLAY):
footer += '<br/>'.join(FOOTER_STRS)
else:
footer = '[? rows x %d columns]<br/>' % self.num_columns()
footer += '<br/>'.join(LAZY_FOOTER_STRS)
begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">'
end = '\n</div>'
return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end
def __nonzero__(self):
"""
Returns true if the frame is not empty.
"""
return self.num_rows() != 0
def __len__(self):
"""
Returns the number of rows of the sframe.
"""
return self.num_rows()
def __copy__(self):
"""
Returns a shallow copy of the sframe.
"""
return self.select_columns(self.column_names())
def __deepcopy__(self, memo):
"""
Returns a deep copy of the sframe. As the data in an SFrame is
immutable, this is identical to __copy__.
"""
return self.__copy__()
def copy(self):
"""
Returns a shallow copy of the sframe.
"""
return self.__copy__()
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
"""
if type(other) is SArray:
if self.__has_size__() and other.__has_size__() and len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__))
def dtype(self):
"""
The type of each column.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
column_types
"""
return self.column_types()
def num_rows(self):
"""
The number of rows in this SFrame.
Returns
-------
out : int
Number of rows in the SFrame.
See Also
--------
num_columns
"""
return self.__proxy__.num_rows()
def num_cols(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_columns, num_rows
"""
return self.__proxy__.num_columns()
def num_columns(self):
"""
The number of columns in this SFrame.
Returns
-------
out : int
Number of columns in the SFrame.
See Also
--------
num_cols, num_rows
"""
return self.__proxy__.num_columns()
def column_names(self):
"""
The name of each column in the SFrame.
Returns
-------
out : list[string]
Column names of the SFrame.
See Also
--------
rename
"""
return self.__proxy__.column_names()
def column_types(self):
"""
The type of each column in the SFrame.
Returns
-------
out : list[type]
Column types of the SFrame.
See Also
--------
dtype
"""
return self.__proxy__.dtype()
def head(self, n=10):
"""
The first n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the first n rows of the current SFrame
See Also
--------
tail, print_rows
"""
return SFrame(_proxy=self.__proxy__.head(n))
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
"""
assert HAS_PANDAS, 'pandas is not installed.'
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df
def to_numpy(self):
"""
Converts this SFrame to a numpy array
This operation will construct a numpy array in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : numpy.ndarray
A Numpy Array containing all the values of the SFrame
"""
assert HAS_NUMPY, 'numpy is not installed.'
import numpy
return numpy.transpose(numpy.asarray([self[x] for x in self.column_names()]))
def tail(self, n=10):
"""
The last n rows of the SFrame.
Parameters
----------
n : int, optional
The number of rows to fetch.
Returns
-------
out : SFrame
A new SFrame which contains the last n rows of the current SFrame
See Also
--------
head, print_rows
"""
return SFrame(_proxy=self.__proxy__.tail(n))
def apply(self, fn, dtype=None, seed=None):
"""
Transform each row to an :class:`~graphlab.SArray` according to a
specified function. Returns a new SArray of ``dtype`` where each element
in this SArray is transformed by `fn(x)` where `x` is a single row in
the sframe represented as a dictionary. The ``fn`` should return
exactly one value which can be cast into type ``dtype``. If ``dtype`` is
not specified, the first 100 rows of the SFrame are used to make a guess
of the target data type.
Parameters
----------
fn : function
The function to transform each row of the SFrame. The return
type should be convertible to `dtype` if `dtype` is not None.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
dtype : dtype, optional
The dtype of the new SArray. If None, the first 100
elements of the array are used to guess the target
data type.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SArray
The SArray transformed by fn. Each element of the SArray is of
type ``dtype``
Examples
--------
Concatenate strings from several columns:
>>> sf = graphlab.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6],
'rating': [4, 5, 1]})
>>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating']))
dtype: str
Rows: 3
['134', '235', '361']
Using native toolkit extension function:
.. code-block:: c++
#include <graphlab/sdk/toolkit_function_macros.hpp>
double mean(const std::map<flexible_type, flexible_type>& dict) {
double sum = 0.0;
for (const auto& kv: dict) sum += (double)kv.second;
return sum / dict.size();
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(mean, "row");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> import example
>>> sf = graphlab.SFrame({'x0': [1, 2, 3], 'x1': [2, 3, 1],
... 'x2': [3, 1, 2]})
>>> sf.apply(example.mean)
dtype: float
Rows: 3
[2.0,2.0,2.0]
"""
assert callable(fn), "Input must be callable"
test_sf = self[:10]
dryrun = [fn(row) for row in test_sf]
if dtype is None:
dtype = SArray(dryrun).dtype()
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
nativefn = None
try:
from .. import extensions as extensions
nativefn = extensions._build_native_function_call(fn)
except:
pass
if nativefn is not None:
# this is a toolkit lambda. We can do something about it
with cython_context():
return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed))
with cython_context():
return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed))
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = graphlab.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
"""
assert callable(fn), "Input must be callable"
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError(
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types.")
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list, "'column_types' must be a list."
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed))
def sample(self, fraction, seed=None):
"""
Sample the current SFrame's rows.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch. Must be between 0 and 1.
The number of rows returned is approximately the fraction times the
number of rows.
seed : int, optional
Seed for the random number generator used to sample.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
"""
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed))
def random_split(self, fraction, seed=None):
"""
Randomly split the rows of an SFrame into two SFrames. The first SFrame
contains *M* rows, sampled uniformly (without replacement) from the
original SFrame. *M* is approximately the fraction times the original
number of rows. The second SFrame contains the remaining rows of the
original SFrame.
Parameters
----------
fraction : float
Approximate fraction of the rows to fetch for the first returned
SFrame. Must be between 0 and 1.
seed : int, optional
Seed for the random number generator used to split.
Returns
-------
out : tuple [SFrame]
Two new SFrames.
Examples
--------
Suppose we have an SFrame with 1,024 rows and we want to randomly split
it into training and testing datasets with about a 90%/10% split.
>>> sf = graphlab.SFrame({'id': range(1024)})
>>> sf_train, sf_test = sf.random_split(.9, seed=5)
>>> print(len(sf_train), len(sf_test))
922 102
"""
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_cols() == 0):
return (SFrame(), SFrame())
if seed is None:
# Include the nanosecond component as well.
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# The server side requires this to be an int, so cast if we can
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
with cython_context():
proxy_pair = self.__proxy__.random_split(fraction, seed)
return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1]))
def topk(self, column_name, k=10, reverse=False):
"""
Get top k rows according to the given column. Result is according to and
sorted by `column_name` in the given order (default is descending).
When `k` is small, `topk` is more efficient than `sort`.
Parameters
----------
column_name : string
The column to sort on
k : int, optional
The number of rows to return
reverse : bool, optional
If True, return the top k rows in ascending order, otherwise, in
descending order.
Returns
-------
out : SFrame
an SFrame containing the top k rows sorted by column_name.
See Also
--------
sort
Examples
--------
>>> sf = graphlab.SFrame({'id': range(1000)})
>>> sf['value'] = -sf['id']
>>> sf.topk('id', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 999 | -999 |
| 998 | -998 |
| 997 | -997 |
+--------+--------+
[3 rows x 2 columns]
>>> sf.topk('value', k=3)
+--------+--------+
| id | value |
+--------+--------+
| 1 | -1 |
| 2 | -2 |
| 3 | -3 |
+--------+--------+
[3 rows x 2 columns]
"""
if type(column_name) is not str:
raise TypeError("column_name must be a string")
sf = self[self[column_name].topk_index(k, reverse)]
return sf.sort(column_name, ascending=reverse)
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv', 'json'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See export_csv for more csv saving options.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
"""
if format == None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary' and format is not 'json':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary' and 'json'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
elif format is 'json':
self.export_json(url)
else:
raise ValueError("Unsupported format: {}".format(format))
def export_csv(self, filename, delimiter=',', line_terminator='\n',
header=True, quote_level=csv.QUOTE_NONNUMERIC, double_quote=True,
escape_char='\\', quote_char='\"', na_rep='',
file_header='', file_footer='', line_prefix='',
_no_prefix_on_first_value=False, **kwargs):
"""
Writes an SFrame to a CSV file.
Parameters
----------
filename : string
The location to save the CSV.
delimiter : string, optional
This describes the delimiter used for writing csv files.
line_terminator: string, optional
The newline character
header : bool, optional
If true, the column names are emitted as a header.
quote_level: csv.QUOTE_ALL | csv.QUOTE_NONE | csv.QUOTE_NONNUMERIC, optional
The quoting level. If csv.QUOTE_ALL, every field is quoted.
if csv.quote_NONE, no field is quoted. If csv.QUOTE_NONNUMERIC, only
non-numeric fileds are quoted. csv.QUOTE_MINIMAL is interpreted as
csv.QUOTE_NONNUMERIC.
double_quote : bool, optional
If True, quotes are escaped as two consecutive quotes
escape_char : string, optional
Character which begins a C escape sequence
quote_char: string, optional
Character used to quote fields
na_rep: string, optional
The value used to denote a missing value.
file_header: string, optional
A string printed to the start of the file
file_footer: string, optional
A string printed to the end of the file
line_prefix: string, optional
A string printed at the start of each value line
"""
# Pandas argument compatibility
if "sep" in kwargs:
delimiter = kwargs['sep']
del kwargs['sep']
if "quotechar" in kwargs:
quote_char = kwargs['quotechar']
del kwargs['quotechar']
if "doublequote" in kwargs:
double_quote = kwargs['doublequote']
del kwargs['doublequote']
if "lineterminator" in kwargs:
line_terminator = kwargs['lineterminator']
del kwargs['lineterminator']
if len(kwargs) > 0:
raise TypeError("Unexpected keyword arguments " + str(list(kwargs.keys())))
write_csv_options = {}
write_csv_options['delimiter'] = delimiter
write_csv_options['escape_char'] = escape_char
write_csv_options['double_quote'] = double_quote
write_csv_options['quote_char'] = quote_char
if quote_level == csv.QUOTE_MINIMAL:
write_csv_options['quote_level'] = 0
elif quote_level == csv.QUOTE_ALL:
write_csv_options['quote_level'] = 1
elif quote_level == csv.QUOTE_NONNUMERIC:
write_csv_options['quote_level'] = 2
elif quote_level == csv.QUOTE_NONE:
write_csv_options['quote_level'] = 3
write_csv_options['header'] = header
write_csv_options['line_terminator'] = line_terminator
write_csv_options['na_value'] = na_rep
write_csv_options['file_header'] = file_header
write_csv_options['file_footer'] = file_footer
write_csv_options['line_prefix'] = line_prefix
# undocumented option. Disables line prefix on the first value line
write_csv_options['_no_prefix_on_first_value'] = _no_prefix_on_first_value
url = _make_internal_url(filename)
self.__proxy__.save_as_csv(url, write_csv_options)
def export_json(self,
filename,
orient='records'):
"""
Writes an SFrame to a JSON file.
Parameters
----------
filename : string
The location to save the JSON file.
orient : string, optional. Either "records" or "lines"
If orient="records" the file is saved as a single JSON array.
If orient="lines", the file is saves as a JSON value per line.
Examples
--------
The orient parameter describes the expected input format of the JSON
file.
If orient="records", the output will be a single JSON Array where
each array element is a dictionary describing the row.
>>> g
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
>>> g.export('output.json', orient='records')
>>> !cat output.json
[
{'a':1,'b':1},
{'a':2,'b':2},
{'a':3,'b':3},
]
If orient="rows", each row will be emitted as a JSON dictionary to
each file line.
>>> g
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
>>> g.export('output.json', orient='rows')
>>> !cat output.json
{'a':1,'b':1}
{'a':2,'b':2}
{'a':3,'b':3}
"""
if orient == "records":
self.pack_columns(dtype=dict).export_csv(
filename, file_header='[', file_footer=']',
header=False, double_quote=False,
quote_level=csv.QUOTE_NONE,
line_prefix=',',
_no_prefix_on_first_value=True)
elif orient == "lines":
self.pack_columns(dtype=dict).export_csv(
filename, header=False, double_quote=False, quote_level=csv.QUOTE_NONE)
else:
raise ValueError("Invalid value for orient parameter (" + str(orient) + ")")
def _save_reference(self, filename):
"""
Performs an incomplete save of an existing SFrame into a directory.
This saved SFrame may reference SFrames in other locations in the same
filesystem for certain resources.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save_reference('data/training_data_sframe')
"""
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
self.__proxy__.save_reference(url)
def select_column(self, key):
"""
Get a reference to the :class:`~graphlab.SArray` that corresponds with
the given key. Throws an exception if the key is something other than a
string or if the key is not found.
Parameters
----------
key : str
The column name.
Returns
-------
out : SArray
The SArray that is referred by ``key``.
See Also
--------
select_columns
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie']})
>>> # This line is equivalent to `sa = sf['user_name']`
>>> sa = sf.select_column('user_name')
>>> sa
dtype: str
Rows: 3
['alice', 'bob', 'charlie']
"""
if not isinstance(key, str):
raise TypeError("Invalid key type: must be str")
with cython_context():
return SArray(data=[], _proxy=self.__proxy__.select_column(key))
def select_columns(self, keylist):
"""
Selects all columns where the name of the column or the type of column
is included in the keylist. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existant columns
are selected.
Throws an exception for all other input types.
Parameters
----------
keylist : list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``keylist`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = graphlab.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(keylist):
raise TypeError("keylist must be an iterable")
if not (all([isinstance(x, str) or isinstance(x, type) or isinstance(x, bytes)
for x in keylist])):
raise TypeError("Invalid key type: must be str, bytes or type")
column_names_set = set(self.column_names())
# quick validation to make sure all selected string columns exist
requested_str_columns = [s for s in keylist if isinstance(s, str)]
for i in requested_str_columns:
if i not in column_names_set:
raise RuntimeError("Column name " + i + " does not exist")
# Make sure there are no duplicates keys
from collections import Counter
keylist_counter = Counter(keylist)
if (len(keylist)) != len(keylist_counter):
for key in keylist_counter:
if keylist_counter[key] > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in keylist if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns))
def add_column(self, data, name=""):
"""
Add a column to this SFrame. The number of elements in the data given
must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self. If no
name is given, a default name is chosen.
Parameters
----------
data : SArray
The 'column' of data to add.
name : string, optional
The name of the column. If no name is given, a default name is
chosen.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = graphlab.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalant to `sf['species'] = sa`
>>> sf.add_column(sa, name='species')
>>> sf
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(name, str):
raise TypeError("Invalid column name: must be str")
with cython_context():
self.__proxy__.add_column(data.__proxy__, name)
self._cache = None
return self
def add_columns(self, data, namelist=None):
"""
Adds multiple columns to this SFrame. The number of elements in all
columns must match the length of every other column of the SFrame. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
namelist : list of string, optional
A list of column names. All names must be specified. ``namelist`` is
ignored if data is an SFrame.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = graphlab.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> sf.add_columns(sf2)
>>> sf
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
namelist = other.column_names()
my_columns = set(self.column_names())
for name in namelist:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(namelist):
raise TypeError("namelist must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in namelist]):
raise TypeError("Invalid column name in list : must all be str")
with cython_context():
self.__proxy__.add_columns([x.__proxy__ for x in datalist], namelist)
self._cache = None
return self
def remove_column(self, name):
"""
Remove a column from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
name : string
The name of the column to remove.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> sf.remove_column('val')
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
name = str(name)
if name not in self.column_names():
raise KeyError('Cannot find column %s' % name)
colid = self.column_names().index(name)
with cython_context():
self.__proxy__.remove_column(colid)
self._cache = None
return self
def remove_columns(self, column_names):
"""
Remove one or more columns from this SFrame. This operation modifies the current
SFrame in place and returns self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> sf.remove_columns(['val1', 'val2'])
>>> sf
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
for colid in reversed(deletion_indices):
with cython_context():
self.__proxy__.remove_column(colid)
self._cache = None
return self
def swap_columns(self, column_1, column_2):
"""
Swap the columns with the given names. This operation modifies the
current SFrame in place and returns self.
Parameters
----------
column_1 : string
Name of column to swap
column_2 : string
Name of other column to swap
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf.swap_columns('id', 'val')
>>> sf
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
"""
colnames = self.column_names()
colid_1 = colnames.index(column_1)
colid_2 = colnames.index(column_2)
with cython_context():
self.__proxy__.swap_columns(colid_1, colid_2)
self._cache = None
return self
def rename(self, names):
"""
Rename the given columns. ``names`` is expected to be a dict specifying
the old and new names. This changes the names of the columns given as
the keys and replaces them with the names given as the values. This
operation modifies the current SFrame in place and returns self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> sf.rename({'X1': 'name', 'X2':'address'})
>>> sf
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
with cython_context():
for k in names:
colid = self.column_names().index(k)
self.__proxy__.set_column_name(colid, names[k])
self._cache = None
return self
def __getitem__(self, key):
"""
This method does things based on the type of `key`.
If `key` is:
* str
selects column with name 'key'
* type
selects all columns with types matching the type
* list of str or type
selects all columns with names or type in the list
* SArray
Performs a logical filter. Expects given SArray to be the same
length as all columns in current SFrame. Every row
corresponding with an entry in the given SArray that is
equivalent to False is filtered from the result.
* int
Returns a single row of the SFrame (the `key`th one) as a dictionary.
* slice
Returns an SFrame including only the sliced rows.
"""
if type(key) is SArray:
return self._row_selector(key)
elif type(key) is str:
return self.select_column(key)
elif type(key) is type:
return self.select_columns([key])
elif _is_non_string_iterable(key):
return self.select_columns(key)
elif isinstance(key, numbers.Integral):
sf_len = len(self)
if key < 0:
key = sf_len + key
if key >= sf_len:
raise IndexError("SFrame index out of range")
if not hasattr(self, '_cache') or self._cache is None:
self._cache = {}
try:
lb, ub, value_list = self._cache["getitem_cache"]
if lb <= key < ub:
return value_list[int(key - lb)]
except KeyError:
pass
# Not in cache, need to grab it. Smaller here than with sarray
# Do we have a good block size that won't cause memory to blow up?
if not "getitem_cache_blocksize" in self._cache:
block_size = \
(8*1024) // sum( (2 if dt in [int, long, float] else 8) for dt in self.column_types())
block_size = max(16, block_size)
self._cache["getitem_cache_blocksize"] = block_size
else:
block_size = self._cache["getitem_cache_blocksize"]
block_num = int(key // block_size)
lb = block_num * block_size
ub = min(sf_len, lb + block_size)
val_list = list(SFrame(_proxy = self.__proxy__.copy_range(lb, 1, ub)))
self._cache["getitem_cache"] = (lb, ub, val_list)
return val_list[int(key - lb)]
elif type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
stop = len(self)
if step is None:
step = 1
# handle negative indices
if start < 0:
start = len(self) + start
if stop < 0:
stop = len(self) + stop
return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop))
else:
raise TypeError("Invalid index type: must be SArray, list, int, or str")
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if type(key) is list:
self.add_columns(value, key)
elif type(key) is str:
sa_value = None
if (type(value) is SArray):
sa_value = value
elif _is_non_string_iterable(value): # wrap list, array... to sarray
sa_value = SArray(value)
else: # create an sarray of constant value
sa_value = SArray.from_const(value, self.num_rows())
# set new column
if not key in self.column_names():
with cython_context():
self.add_column(sa_value, key)
else:
# special case if replacing the only column.
# server would fail the replacement if the new column has different
# length than current one, which doesn't make sense if we are replacing
# the only column. To support this, we first take out the only column
# and then put it back if exception happens
single_column = (self.num_cols() == 1)
if (single_column):
tmpname = key
saved_column = self.select_column(key)
self.remove_column(key)
else:
# add the column to a unique column name.
tmpname = '__' + '-'.join(self.column_names())
try:
self.add_column(sa_value, tmpname)
except Exception as e:
if (single_column):
self.add_column(saved_column, key)
raise
if (not single_column):
# if add succeeded, remove the column name and rename tmpname->columnname.
self.swap_columns(key, tmpname)
self.remove_column(key)
self.rename({tmpname: key})
else:
raise TypeError('Cannot set column with key type ' + str(type(key)))
def __delitem__(self, key):
"""
Wrapper around remove_column.
"""
self.remove_column(key)
def materialize(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
return self.__materialize__()
def __materialize__(self):
"""
For an SFrame that is lazily evaluated, force the persistence of the
SFrame to disk, committing all lazy evaluated operations.
"""
with cython_context():
self.__proxy__.materialize()
def is_materialized(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__is_materialized__()
def __is_materialized__(self):
"""
Returns whether or not the SFrame has been materialized.
"""
return self.__proxy__.is_materialized()
def __has_size__(self):
"""
Returns whether or not the size of the SFrame is known.
"""
return self.__proxy__.has_size()
def __query_plan_str__(self):
"""
Returns the query plan as a dot graph string
"""
return self.__proxy__.query_plan_string()
def __iter__(self):
"""
Provides an iterator to the rows of the SFrame.
"""
def generator():
elems_at_a_time = 262144
self.__proxy__.begin_iterator()
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
column_names = self.column_names()
while(True):
for j in ret:
yield dict(list(zip(column_names, j)))
if len(ret) == elems_at_a_time:
ret = self.__proxy__.iterator_get_next(elems_at_a_time)
else:
break
return generator()
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = graphlab.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = graphlab.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
"""
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe.__copy__()
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break;
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name);
processed_other_frame.add_column(other_column, col_name)
# check column type
if my_column_types[i] != other_column.dtype():
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype()))
with cython_context():
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__))
def groupby(self, key_columns, operations, *args):
"""
Perform a group on the key_columns followed by aggregations on the
columns listed in operations.
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on. The
available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT,
SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators
MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and
VAR. See :mod:`~graphlab.aggregate` for more detail on the aggregators.
Parameters
----------
key_columns : string | list[string]
Column(s) to group by. Key columns can be of any type other than
dictionary.
operations : dict, list
Dictionary of columns and aggregation operations. Each key is a
output column name and each value is an aggregator. This can also
be a list of aggregators, in which case column names will be
automatically assigned.
*args
All other remaining arguments will be interpreted in the same
way as the operations argument.
Returns
-------
out_sf : SFrame
A new SFrame, with a column for each groupby column and each
aggregation operation.
See Also
--------
aggregate
Examples
--------
Suppose we have an SFrame with movie ratings by many users.
>>> import graphlab.aggregate as agg
>>> url = 'https://static.turi.com/datasets/rating_data_example.csv'
>>> sf = graphlab.SFrame.read_csv(url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[10000 rows x 3 columns]
Compute the number of occurrences of each user.
>>> user_count = sf.groupby(key_columns='user_id',
... operations={'count': agg.COUNT()})
>>> user_count
+---------+-------+
| user_id | count |
+---------+-------+
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
| ... | ... |
+---------+-------+
[9852 rows x 2 columns]
Compute the mean and standard deviation of ratings per user.
>>> user_rating_stats = sf.groupby(key_columns='user_id',
... operations={
... 'mean_rating': agg.MEAN('rating'),
... 'std_rating': agg.STD('rating')
... })
>>> user_rating_stats
+---------+-------------+------------+
| user_id | mean_rating | std_rating |
+---------+-------------+------------+
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
| ... | ... | ... |
+---------+-------------+------------+
[9852 rows x 3 columns]
Compute the movie with the minimum rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={
... 'worst_movies': agg.ARGMIN('rating','movie_id')
... })
>>> chosen_movies
+---------+-------------+
| user_id | worst_movies |
+---------+-------------+
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
| ... | ... |
+---------+-------------+
[9852 rows x 2 columns]
Compute the movie with the max rating per user and also the movie with
the maximum imdb-ranking per user.
>>> sf['imdb-ranking'] = sf['rating'] * 10
>>> chosen_movies = sf.groupby(key_columns='user_id',
... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')})
>>> chosen_movies
+---------+------------------+------------------------+
| user_id | max_rating_movie | max_imdb_ranking_movie |
+---------+------------------+------------------------+
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
| ... | ... | ... |
+---------+------------------+------------------------+
[9852 rows x 3 columns]
Compute the movie with the max rating per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={'best_movies': agg.ARGMAX('rating','movie')})
Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user.
>>> chosen_movies = sf.groupby(key_columns='user_id',
operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')})
Compute the count, mean, and standard deviation of ratings per (user,
time), automatically assigning output column names.
>>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000)
>>> user_rating_stats = sf.groupby(['user_id', 'time'],
... [agg.COUNT(),
... agg.AVG('rating'),
... agg.STDV('rating')])
>>> user_rating_stats
+------+---------+-------+---------------+----------------+
| time | user_id | Count | Avg of rating | Stdv of rating |
+------+---------+-------+---------------+----------------+
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
| ... | ... | ... | ... | ... |
+------+---------+-------+---------------+----------------+
[10000 rows x 5 columns]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings:
>>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(),
... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])})
>>> user_rating_stats
+------+---------+-------+------------------------+
| time | user_id | Count | rating_quantiles |
+------+---------+-------+------------------------+
| 2006 | 61285 | 1 | array('d', [4.0, 4.0]) |
| 2000 | 36078 | 1 | array('d', [4.0, 4.0]) |
| 2003 | 47158 | 1 | array('d', [3.0, 3.0]) |
| 2007 | 34446 | 1 | array('d', [3.0, 3.0]) |
| 2010 | 47990 | 1 | array('d', [3.0, 3.0]) |
| 2003 | 42120 | 1 | array('d', [5.0, 5.0]) |
| 2007 | 44940 | 1 | array('d', [4.0, 4.0]) |
| 2008 | 58240 | 1 | array('d', [4.0, 4.0]) |
| 2002 | 102 | 1 | array('d', [1.0, 1.0]) |
| 2009 | 52708 | 1 | array('d', [3.0, 3.0]) |
| ... | ... | ... | ... |
+------+---------+-------+------------------------+
[10000 rows x 4 columns]
To put all items a user rated into one list value by their star rating:
>>> user_rating_stats = sf.groupby(["user_id", "rating"],
... {"rated_movie_ids":agg.CONCAT("movie_id")})
>>> user_rating_stats
+--------+---------+----------------------+
| rating | user_id | rated_movie_ids |
+--------+---------+----------------------+
| 3 | 31434 | array('d', [1663.0]) |
| 5 | 25944 | array('d', [1663.0]) |
| 4 | 38827 | array('d', [1663.0]) |
| 4 | 51437 | array('d', [1663.0]) |
| 4 | 42549 | array('d', [1663.0]) |
| 4 | 49532 | array('d', [1663.0]) |
| 3 | 26124 | array('d', [1663.0]) |
| 4 | 46336 | array('d', [1663.0]) |
| 4 | 52133 | array('d', [1663.0]) |
| 5 | 62361 | array('d', [1663.0]) |
| ... | ... | ... |
+--------+---------+----------------------+
[9952 rows x 3 columns]
To put all items and rating of a given user together into a dictionary
value:
>>> user_rating_stats = sf.groupby("user_id",
... {"movie_rating":agg.CONCAT("movie_id", "rating")})
>>> user_rating_stats
+---------+--------------+
| user_id | movie_rating |
+---------+--------------+
| 62361 | {1663: 5} |
| 30727 | {1663: 4} |
| 40111 | {1663: 2} |
| 50513 | {1663: 4} |
| 35140 | {1663: 4} |
| 42352 | {1663: 5} |
| 29667 | {1663: 4} |
| 46242 | {1663: 5} |
| 58310 | {1663: 2} |
| 64614 | {1663: 2} |
| ... | ... |
+---------+--------------+
[9852 rows x 2 columns]
"""
# some basic checking first
# make sure key_columns is a list
if isinstance(key_columns, str):
key_columns = [key_columns]
# check that every column is a string, and is a valid column name
my_column_names = self.column_names()
key_columns_array = []
for column in key_columns:
if not isinstance(column, str):
raise TypeError("Column name must be a string")
if column not in my_column_names:
raise KeyError("Column " + column + " does not exist in SFrame")
if self[column].dtype() == dict:
raise TypeError("Cannot group on a dictionary column.")
key_columns_array.append(column)
group_output_columns = []
group_columns = []
group_ops = []
all_ops = [operations] + list(args)
for op_entry in all_ops:
# if it is not a dict, nor a list, it is just a single aggregator
# element (probably COUNT). wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not(isinstance(operation, list) or isinstance(operation, dict)):
operation = [operation]
if isinstance(operation, dict):
# now sweep the dict and add to group_columns and group_ops
for key in operation:
val = operation[key]
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)):
raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.")
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for (col,output) in zip(column[0],key):
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [output]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [key]
if (op == '__builtin__concat__dict__'):
key_column = column[0]
key_column_type = self.select_column(key_column).dtype()
if not key_column_type in (int, float, str):
raise TypeError('CONCAT key column must be int, float or str type')
elif val == aggregate.COUNT:
group_output_columns = group_output_columns + [key]
val = aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition of output column: " + key)
elif isinstance(operation, list):
# we will be using automatically defined column names
for val in operation:
if type(val) is tuple:
(op, column) = val
if (op == '__builtin__avg__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__avg__'
if (op == '__builtin__sum__' and self[column[0]].dtype() is array.array):
op = '__builtin__vector__sum__'
if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple:
for col in column[0]:
group_columns = group_columns + [[col,column[1]]]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
else:
group_columns = group_columns + [column]
group_ops = group_ops + [op]
group_output_columns = group_output_columns + [""]
if (op == '__builtin__concat__dict__'):
key_column = column[0]
key_column_type = self.select_column(key_column).dtype()
if not key_column_type in (int, float, str):
raise TypeError('CONCAT key column must be int, float or str type')
elif val == aggregate.COUNT:
group_output_columns = group_output_columns + [""]
val = aggregate.COUNT()
(op, column) = val
group_columns = group_columns + [column]
group_ops = group_ops + [op]
else:
raise TypeError("Unexpected type in aggregator definition.")
# let's validate group_columns and group_ops are valid
for (cols, op) in zip(group_columns, group_ops):
for col in cols:
if not isinstance(col, str):
raise TypeError("Column name must be a string")
if not isinstance(op, str):
raise TypeError("Operation type not recognized.")
if op is not aggregate.COUNT()[0]:
for col in cols:
if col not in my_column_names:
raise KeyError("Column " + col + " does not exist in SFrame")
with cython_context():
return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array,
group_columns,
group_output_columns,
group_ops))
def join(self, right, on=None, how='inner'):
"""
Merge two SFrames. Merges the current (left) SFrame with the given
(right) SFrame using a SQL-style equi-join operation by columns.
Parameters
----------
right : SFrame
The SFrame to join.
on : None | str | list | dict, optional
The column name(s) representing the set of join keys. Each row that
has the same value in this set of columns will be merged together.
* If 'None' is given, join will use all columns that have the same
name as the set of join keys.
* If a str is given, this is interpreted as a join using one column,
where both SFrames have the same column name.
* If a list is given, this is interpreted as a join using one or
more column names, where each column name given exists in both
SFrames.
* If a dict is given, each dict key is taken as a column name in the
left SFrame, and each dict value is taken as the column name in
right SFrame that will be joined together. e.g.
{'left_col_name':'right_col_name'}.
how : {'left', 'right', 'outer', 'inner'}, optional
The type of join to perform. 'inner' is default.
* inner: Equivalent to a SQL inner join. Result consists of the
rows from the two frames whose join key values match exactly,
merged together into one SFrame.
* left: Equivalent to a SQL left outer join. Result is the union
between the result of an inner join and the rest of the rows from
the left SFrame, merged with missing values.
* right: Equivalent to a SQL right outer join. Result is the union
between the result of an inner join and the rest of the rows from
the right SFrame, merged with missing values.
* outer: Equivalent to a SQL full outer join. Result is
the union between the result of a left outer join and a right
outer join.
Returns
-------
out : SFrame
Examples
--------
>>> animals = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'name': ['dog', 'cat', 'sheep', 'cow']})
>>> sounds = graphlab.SFrame({'id': [1, 3, 4, 5],
... 'sound': ['woof', 'baa', 'moo', 'oink']})
>>> animals.join(sounds, how='inner')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
+----+-------+-------+
[3 rows x 3 columns]
>>> animals.join(sounds, on='id', how='left')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 2 | cat | None |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on=['id'], how='right')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
+----+-------+-------+
[4 rows x 3 columns]
>>> animals.join(sounds, on={'id':'id'}, how='outer')
+----+-------+-------+
| id | name | sound |
+----+-------+-------+
| 1 | dog | woof |
| 3 | sheep | baa |
| 4 | cow | moo |
| 5 | None | oink |
| 2 | cat | None |
+----+-------+-------+
[5 rows x 3 columns]
"""
available_join_types = ['left','right','outer','inner']
if not isinstance(right, SFrame):
raise TypeError("Can only join two SFrames")
if how not in available_join_types:
raise ValueError("Invalid join type")
if (self.num_cols() <= 0) or (right.num_cols() <= 0):
raise ValueError("Cannot join an SFrame with no columns.")
join_keys = dict()
if on is None:
left_names = self.column_names()
right_names = right.column_names()
common_columns = [name for name in left_names if name in right_names]
for name in common_columns:
join_keys[name] = name
elif type(on) is str:
join_keys[on] = on
elif type(on) is list:
for name in on:
if type(name) is not str:
raise TypeError("Join keys must each be a str.")
join_keys[name] = name
elif type(on) is dict:
join_keys = on
else:
raise TypeError("Must pass a str, list, or dict of join keys")
with cython_context():
return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys))
def filter_by(self, values, column_name, exclude=False):
"""
Filter an SFrame by values inside an iterable object. Result is an
SFrame that only includes (or excludes) the rows that have a column
with the given ``column_name`` which holds one of the values in the
given ``values`` :class:`~graphlab.SArray`. If ``values`` is not an
SArray, we attempt to convert it to one before filtering.
Parameters
----------
values : SArray | list | numpy.ndarray | pandas.Series | str
The values to use to filter the SFrame. The resulting SFrame will
only include rows that have one of these values in the given
column.
column_name : str
The column of the SFrame to match with the given `values`.
exclude : bool
If True, the result SFrame will contain all rows EXCEPT those that
have one of ``values`` in ``column_name``.
Returns
-------
out : SFrame
The filtered SFrame.
Examples
--------
>>> sf = graphlab.SFrame({'id': [1, 2, 3, 4],
... 'animal_type': ['dog', 'cat', 'cow', 'horse'],
... 'name': ['bob', 'jim', 'jimbob', 'bobjim']})
>>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake']
>>> sf.filter_by(household_pets, 'animal_type')
+-------------+----+------+
| animal_type | id | name |
+-------------+----+------+
| dog | 1 | bob |
| cat | 2 | jim |
+-------------+----+------+
[2 rows x 3 columns]
>>> sf.filter_by(household_pets, 'animal_type', exclude=True)
+-------------+----+--------+
| animal_type | id | name |
+-------------+----+--------+
| horse | 4 | bobjim |
| cow | 3 | jimbob |
+-------------+----+--------+
[2 rows x 3 columns]
"""
if type(column_name) is not str:
raise TypeError("Must pass a str as column_name")
existing_columns = self.column_names()
if column_name not in existing_columns:
raise KeyError("Column '" + column_name + "' not in SFrame.")
if type(values) is not SArray:
# If we were given a single element, try to put in list and convert
# to SArray
if not _is_non_string_iterable(values):
values = [values]
values = SArray(values)
value_sf = SFrame()
value_sf.add_column(values, column_name)
existing_type = self.column_types()[self.column_names().index(column_name)]
given_type = value_sf.column_types()[0]
if given_type != existing_type:
raise TypeError("Type of given values does not match type of column '" +
column_name + "' in SFrame.")
# Make sure the values list has unique values, or else join will not
# filter.
value_sf = value_sf.groupby(column_name, {})
with cython_context():
if exclude:
id_name = "id"
# Make sure this name is unique so we know what to remove in
# the result
while id_name in existing_columns:
id_name += "1"
value_sf = value_sf.add_row_number(id_name)
tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'left',
{column_name:column_name}))
ret_sf = tmp[tmp[id_name] == None]
del ret_sf[id_name]
return ret_sf
else:
return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__,
'inner',
{column_name:column_name}))
def show(self, columns=None, view=None, x=None, y=None):
"""
show(columns=None, view=None, x=None, y=None)
Visualize the SFrame with GraphLab Create :mod:`~graphlab.canvas`. This function
starts Canvas if it is not already running. If the SFrame has already been plotted,
this function will update the plot.
Parameters
----------
view : str, optional
The name of the SFrame view to show. Can be one of:
- None: Use the default (depends on which Canvas target is set).
- 'Table': Show a scrollable, tabular view of the data in the
SFrame.
- 'Summary': Show a list of columns with some summary statistics
and plots for each column.
- 'Scatter Plot': Show a scatter plot of two numeric columns.
- 'Heat Map': Show a heat map of two numeric columns.
- 'Bar Chart': Show a bar chart of one numeric and one categorical
column.
- 'BoxWhisker Plot': Show a box and whisker plot of one numeric and one
categorical column.
- 'Line Chart': Show a line chart of one numeric and one
categorical column.
x : str, optional
The column to use for the X axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the columns
in this SFrame. For Scatter Plot and Heat Map, the column must be
numeric (int or float). If not set, defaults to the first available
valid column.
y : str, optional
The column to use for the Y axis in a Scatter Plot, Heat Map, Bar
Chart, or Line Chart view. Must be the name of one of the numeric
columns in this SFrame. If not set, defaults to the second
available numeric column.
Returns
-------
view : graphlab.canvas.view.View
An object representing the GraphLab Canvas view.
See Also
--------
canvas
Examples
--------
Suppose 'sf' is an SFrame, we can view it in GraphLab Canvas using:
>>> sf.show()
To choose a specific view of the SFrame:
>>> sf.show(view="Summary")
>>> sf.show(view="Table")
>>> sf.show(view="Bar Chart", x="col1", y="col2")
>>> sf.show(view="Line Chart", x="col1", y="col2")
>>> sf.show(view="Scatter Plot", x="col1", y="col2")
>>> sf.show(view="Heat Map", x="col1", y="col2")
"""
if columns is not None:
__LOGGER__.warn("Column selection for SFrame.show is deprecated. To show only certain columns, use the sf[['column1', 'column2']] syntax or construct a new SFrame with the desired columns.")
from ..visualization.show import show
show(self, view=view, x=x, y=y)
def pack_columns(self, columns=None, column_prefix=None, dtype=list,
fill_na=None, remove_prefix=True, new_column_name=None):
"""
Pack columns of the current SFrame into one single column. The result
is a new SFrame with the unaffected columns from the original SFrame
plus the newly created column.
The list of columns that are packed is chosen through either the
``columns`` or ``column_prefix`` parameter. Only one of the parameters
is allowed to be provided. ``columns`` explicitly specifies the list of
columns to pack, while ``column_prefix`` specifies that all columns that
have the given prefix are to be packed.
The type of the resulting column is decided by the ``dtype`` parameter.
Allowed values for ``dtype`` are dict, array.array and list:
- *dict*: pack to a dictionary SArray where column name becomes
dictionary key and column value becomes dictionary value
- *array.array*: pack all values from the packing columns into an array
- *list*: pack all values from the packing columns into a list.
Parameters
----------
columns : list[str], optional
A list of column names to be packed. If omitted and
`column_prefix` is not specified, all columns from current SFrame
are packed. This parameter is mutually exclusive with the
`column_prefix` parameter.
column_prefix : str, optional
Pack all columns with the given `column_prefix`.
This parameter is mutually exclusive with the `columns` parameter.
dtype : dict | array.array | list, optional
The resulting packed column type. If not provided, dtype is list.
fill_na : value, optional
Value to fill into packed column if missing value is encountered.
If packing to dictionary, `fill_na` is only applicable to dictionary
values; missing keys are not replaced.
remove_prefix : bool, optional
If True and `column_prefix` is specified, the dictionary key will
be constructed by removing the prefix from the column name.
This option is only applicable when packing to dict type.
new_column_name : str, optional
Packed column name. If not given and `column_prefix` is given,
then the prefix will be used as the new column name, otherwise name
is generated automatically.
Returns
-------
out : SFrame
An SFrame that contains columns that are not packed, plus the newly
packed column.
See Also
--------
unpack
Notes
-----
- If packing to dictionary, missing key is always dropped. Missing
values are dropped if fill_na is not provided, otherwise, missing
value is replaced by 'fill_na'. If packing to list or array, missing
values will be kept. If 'fill_na' is provided, the missing value is
replaced with 'fill_na' value.
Examples
--------
Suppose 'sf' is an an SFrame that maintains business category
information:
>>> sf = graphlab.SFrame({'business': range(1, 5),
... 'category.retail': [1, None, 1, None],
... 'category.food': [1, 1, None, None],
... 'category.service': [None, 1, 1, None],
... 'category.shop': [1, 1, None, 1]})
>>> sf
+----------+-----------------+---------------+------------------+---------------+
| business | category.retail | category.food | category.service | category.shop |
+----------+-----------------+---------------+------------------+---------------+
| 1 | 1 | 1 | None | 1 |
| 2 | None | 1 | 1 | 1 |
| 3 | 1 | None | 1 | None |
| 4 | None | 1 | None | 1 |
+----------+-----------------+---------------+------------------+---------------+
[4 rows x 5 columns]
To pack all category columns into a list:
>>> sf.pack_columns(column_prefix='category')
+----------+--------------------+
| business | X2 |
+----------+--------------------+
| 1 | [1, 1, None, 1] |
| 2 | [None, 1, 1, 1] |
| 3 | [1, None, 1, None] |
| 4 | [None, 1, None, 1] |
+----------+--------------------+
[4 rows x 2 columns]
To pack all category columns into a dictionary, with new column name:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
... new_column_name='category')
+----------+--------------------------------+
| business | category |
+----------+--------------------------------+
| 1 | {'food': 1, 'shop': 1, 're ... |
| 2 | {'food': 1, 'shop': 1, 'se ... |
| 3 | {'retail': 1, 'service': 1} |
| 4 | {'food': 1, 'shop': 1} |
+----------+--------------------------------+
[4 rows x 2 columns]
To keep column prefix in the resulting dict key:
>>> sf.pack_columns(column_prefix='category', dtype=dict,
remove_prefix=False)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | {'category.retail': 1, 'ca ... |
| 2 | {'category.food': 1, 'cate ... |
| 3 | {'category.retail': 1, 'ca ... |
| 4 | {'category.food': 1, 'cate ... |
+----------+--------------------------------+
[4 rows x 2 columns]
To explicitly pack a set of columns:
>>> sf.pack_columns(columns = ['business', 'category.retail',
'category.food', 'category.service',
'category.shop'])
+-----------------------+
| X1 |
+-----------------------+
| [1, 1, 1, None, 1] |
| [2, None, 1, 1, 1] |
| [3, 1, None, 1, None] |
| [4, None, 1, None, 1] |
+-----------------------+
[4 rows x 1 columns]
To pack all columns with name starting with 'category' into an array
type, and with missing value replaced with 0:
>>> sf.pack_columns(column_prefix="category", dtype=array.array,
... fill_na=0)
+----------+--------------------------------+
| business | X2 |
+----------+--------------------------------+
| 1 | array('d', [1.0, 1.0, 0.0, ... |
| 2 | array('d', [0.0, 1.0, 1.0, ... |
| 3 | array('d', [1.0, 0.0, 1.0, ... |
| 4 | array('d', [0.0, 1.0, 0.0, ... |
+----------+--------------------------------+
[4 rows x 2 columns]
"""
if columns != None and column_prefix != None:
raise ValueError("'columns' and 'column_prefix' parameter cannot be given at the same time.")
if new_column_name == None and column_prefix != None:
new_column_name = column_prefix
if column_prefix != None:
if type(column_prefix) != str:
raise TypeError("'column_prefix' must be a string")
columns = [name for name in self.column_names() if name.startswith(column_prefix)]
if len(columns) == 0:
raise ValueError("There is no column starts with prefix '" + column_prefix + "'")
elif columns == None:
columns = self.column_names()
else:
if not _is_non_string_iterable(columns):
raise TypeError("columns must be an iterable type")
column_names = set(self.column_names())
for column in columns:
if (column not in column_names):
raise ValueError("Current SFrame has no column called '" + str(column) + "'.")
# check duplicate names
if len(set(columns)) != len(columns):
raise ValueError("There is duplicate column names in columns parameter")
if (dtype not in (dict, list, array.array)):
raise ValueError("Resulting dtype has to be one of dict/array.array/list type")
# fill_na value for array needs to be numeric
if dtype == array.array:
if (fill_na != None) and (type(fill_na) not in (int, float)):
raise ValueError("fill_na value for array needs to be numeric type")
# all columns have to be numeric type
for column in columns:
if self[column].dtype() not in (int, float):
raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type")
# generate dict key names if pack to dictionary
# we try to be smart here
# if all column names are like: a.b, a.c, a.d,...
# we then use "b", "c", "d", etc as the dictionary key during packing
if (dtype == dict) and (column_prefix != None) and (remove_prefix == True):
size_prefix = len(column_prefix)
first_char = set([c[size_prefix:size_prefix+1] for c in columns])
if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']):
dict_keys = [name[size_prefix+1:] for name in columns]
else:
dict_keys = [name[size_prefix:] for name in columns]
else:
dict_keys = columns
rest_columns = [name for name in self.column_names() if name not in columns]
if new_column_name != None:
if type(new_column_name) != str:
raise TypeError("'new_column_name' has to be a string")
if new_column_name in rest_columns:
raise KeyError("Current SFrame already contains a column name " + new_column_name)
else:
new_column_name = ""
ret_sa = None
with cython_context():
ret_sa = SArray(_proxy=self.__proxy__.pack_columns(columns, dict_keys,
dtype, fill_na))
new_sf = self.select_columns(rest_columns)
new_sf.add_column(ret_sa, new_column_name)
return new_sf
def split_datetime(self, expand_column, column_name_prefix=None, limit=None, tzone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`graphlab.SArray.split_datetime()`
Parameters
----------
expand_column : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
tzone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
"""
if expand_column not in self.column_names():
raise KeyError("column '" + expand_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = expand_column
new_sf = self[expand_column].split_datetime(column_name_prefix, limit, tzone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != expand_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))))
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def unpack(self, unpack_column, column_name_prefix=None, column_types=None,
na_value=None, limit=None):
"""
Expand one column of this SFrame to multiple columns with each value in
a separate column. Returns a new SFrame with the unpacked column
replaced with a list of new columns. The column must be of
list/array/dict type.
For more details regarding name generation, missing value handling and
other, refer to the SArray version of
:py:func:`~graphlab.SArray.unpack()`.
Parameters
----------
unpack_column : str
Name of the unpacked column
column_name_prefix : str, optional
If provided, unpacked column names would start with the given
prefix. If not provided, default value is the name of the unpacked
column.
column_types : [type], optional
Column types for the unpacked columns.
If not provided, column types are automatically inferred from first
100 rows. For array type, default column types are float. If
provided, column_types also restricts how many columns to unpack.
na_value : flexible_type, optional
If provided, convert all values that are equal to "na_value" to
missing value (None).
limit : list[str] | list[int], optional
Control unpacking only a subset of list/array/dict value. For
dictionary SArray, `limit` is a list of dictionary keys to restrict.
For list/array SArray, `limit` is a list of integers that are
indexes into the list/array value.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of unpacked columns.
See Also
--------
pack_columns, SArray.unpack
Examples
---------
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]})
+----+------------------+
| id | wc |
+----+------------------+
| 1 | {'a': 1} |
| 2 | {'b': 2} |
| 3 | {'a': 1, 'b': 2} |
+----+------------------+
[3 rows x 2 columns]
>>> sf.unpack('wc')
+----+------+------+
| id | wc.a | wc.b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To not have prefix in the generated column name:
>>> sf.unpack('wc', column_name_prefix="")
+----+------+------+
| id | a | b |
+----+------+------+
| 1 | 1 | None |
| 2 | None | 2 |
| 3 | 1 | 2 |
+----+------+------+
[3 rows x 3 columns]
To limit subset of keys to unpack:
>>> sf.unpack('wc', limit=['b'])
+----+------+
| id | wc.b |
+----+------+
| 1 | None |
| 2 | 2 |
| 3 | 2 |
+----+------+
[3 rows x 3 columns]
To unpack an array column:
>>> sf = graphlab.SFrame({'id': [1,2,3],
... 'friends': [array.array('d', [1.0, 2.0, 3.0]),
... array.array('d', [2.0, 3.0, 4.0]),
... array.array('d', [3.0, 4.0, 5.0])]})
>>> sf
+----+-----------------------------+
| id | friends |
+----+-----------------------------+
| 1 | array('d', [1.0, 2.0, 3.0]) |
| 2 | array('d', [2.0, 3.0, 4.0]) |
| 3 | array('d', [3.0, 4.0, 5.0]) |
+----+-----------------------------+
[3 rows x 2 columns]
>>> sf.unpack('friends')
+----+-----------+-----------+-----------+
| id | friends.0 | friends.1 | friends.2 |
+----+-----------+-----------+-----------+
| 1 | 1.0 | 2.0 | 3.0 |
| 2 | 2.0 | 3.0 | 4.0 |
| 3 | 3.0 | 4.0 | 5.0 |
+----+-----------+-----------+-----------+
[3 rows x 4 columns]
"""
if unpack_column not in self.column_names():
raise KeyError("column '" + unpack_column + "' does not exist in current SFrame")
if column_name_prefix == None:
column_name_prefix = unpack_column
new_sf = self[unpack_column].unpack(column_name_prefix, column_types, na_value, limit)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != unpack_column]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))))
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf)
return ret_sf
def stack(self, column_name, new_column_name=None, drop_na=False, new_column_type=None):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The new SFrame includes the newly created column and all columns other
than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
new_column_type : type | list of types, optional
The new column types. If original column is a list/array type
new_column_type must be a single type, or a list of one type. If
original column is of dict type, new_column_type must be a list of
two types. If not provided, the types are automatically inferred
from the first 100 values of the SFrame.
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = graphlab.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set dropna=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = graphlab.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+------+--------+
| user | friend |
+------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+------+--------+
[9 rows x 2 columns]
"""
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype()
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
# user defined types. do some checking
if new_column_type != None:
# if new_column_type is a single type, just make it a list of one type
if type(new_column_type) is type:
new_column_type = [new_column_type]
if (stack_column_type in [list, array.array]) and len(new_column_type) != 1:
raise ValueError("Expecting a single column type to unpack list or array columns")
if (stack_column_type in [dict]) and len(new_column_type) != 2:
raise ValueError("Expecting two column types to unpack a dict column")
if (new_column_name != None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if new_column_type == None:
# we have to perform type inference
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val != None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name,
new_column_name,
new_column_type, drop_na))
def unstack(self, column, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column`` is a numeric column, the result will be of
array.array type. If ``column`` is a non-numeric column, the new column
will be of list type. If ``column`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~graphlab.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = graphlab.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = graphlab.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='friends')
+------+-----------------------------+
| user | friends |
+------+-----------------------------+
| 3 | array('d', [5.0]) |
| 1 | array('d', [2.0, 4.0, 3.0]) |
| 2 | array('d', [5.0, 6.0, 4.0]) |
| 4 | array('d', [2.0, 3.0]) |
+------+-----------------------------+
[4 rows x 2 columns]
"""
if (type(column) != str and len(column) != 2):
raise TypeError("'column' parameter has to be either a string or a list of two strings.")
with cython_context():
if type(column) == str:
key_columns = [i for i in self.column_names() if i != column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name : aggregate.CONCAT(column)})
else:
return self.groupby(key_columns, aggregate.CONCAT(column))
elif len(column) == 2:
key_columns = [i for i in self.column_names() if i not in column]
if new_column_name != None:
return self.groupby(key_columns, {new_column_name: aggregate.CONCAT(column[0], column[1])})
else:
return self.groupby(key_columns, aggregate.CONCAT(column[0], column[1]))
def unique(self):
"""
Remove duplicate rows of the SFrame. Will not necessarily preserve the
order of the given SFrame in the new SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the unique rows of the current SFrame.
Raises
------
TypeError
If any column in the SFrame is a dictionary type.
See Also
--------
SArray.unique
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]})
>>> sf
+----+-------+
| id | value |
+----+-------+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
| 3 | 3 |
| 4 | 4 |
+----+-------+
[5 rows x 2 columns]
>>> sf.unique()
+----+-------+
| id | value |
+----+-------+
| 2 | 2 |
| 4 | 4 |
| 3 | 3 |
| 1 | 1 |
+----+-------+
[4 rows x 2 columns]
"""
return self.groupby(self.column_names(),{})
def sort(self, sort_columns, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
sort_columns : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `sort_columns` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = graphlab.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
"""
sort_column_names = []
sort_column_orders = []
# validate sort_columns
if (type(sort_columns) == str):
sort_column_names = [sort_columns]
elif (type(sort_columns) == list):
if (len(sort_columns) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in sort_columns])
if (len(first_param_types) != 1):
raise ValueError("sort_columns element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in sort_columns]
sort_column_orders = [i[1] for i in sort_columns]
elif(first_param_type == str):
sort_column_names = sort_columns
else:
raise TypeError("sort_columns type is not supported")
else:
raise TypeError("sort_columns type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype() not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders))
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
"""
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False))
def dropna_split(self, columns=None, how='any'):
"""
Split rows with missing values from this SFrame. This function has the
same functionality as :py:func:`~graphlab.SFrame.dropna`, but returns a
tuple of two SFrames. The first item is the expected output from
:py:func:`~graphlab.SFrame.dropna`, and the second item contains all the
rows filtered out by the `dropna` algorithm.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : (SFrame, SFrame)
(SFrame with missing values removed,
SFrame with the removed missing values)
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> good, bad = sf.dropna_split()
>>> good
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
>>> bad
+------+------+
| a | b |
+------+------+
| None | b |
| None | None |
+------+------+
[2 rows x 2 columns]
"""
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return (SFrame(_proxy=self.__proxy__), SFrame())
(columns, all_behavior) = self.__dropna_errchk(columns, how)
sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True)
if len(sframe_tuple) != 2:
raise RuntimeError("Did not return two SFrames!")
with cython_context():
return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1]))
def __dropna_errchk(self, columns, how):
if columns is None:
# Default behavior is to consider every column, specified to
# the server by an empty list (to avoid sending all the column
# in this case, since it is the most common)
columns = list()
elif type(columns) is str:
columns = [columns]
elif type(columns) is not list:
raise TypeError("Must give columns as a list, str, or 'None'")
else:
# Verify that we are only passing strings in our list
list_types = set([type(i) for i in columns])
if (str not in list_types) or (len(list_types) > 1):
raise TypeError("All columns must be of 'str' type")
if how not in ['any','all']:
raise ValueError("Must specify 'any' or 'all'")
if how == 'all':
all_behavior = True
else:
all_behavior = False
return (columns, all_behavior)
def fillna(self, column, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = graphlab.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
"""
# Normal error checking
if type(column) is not str:
raise TypeError("Must give column name as a str")
ret = self[self.column_names()]
ret[column] = ret[column].fillna(value)
return ret
def add_row_number(self, column_name='id', start=0):
"""
Returns a new SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = graphlab.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
"""
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name)
new_sf.add_columns(self)
return new_sf
def _group(self, key_columns):
"""
Left undocumented intentionally.
"""
gsf = GroupedSFrame(self, key_columns)
return gsf
@property
def shape(self):
"""
The shape of the SFrame, in a tuple. The first entry is the number of
rows, the second is the number of columns.
Examples
--------
>>> sf = graphlab.SFrame({'id':[1,2,3], 'val':['A','B','C']})
>>> sf.shape
(3, 2)
"""
return (self.num_rows(), self.num_cols())
@property
def __proxy__(self):
return self._proxy
@__proxy__.setter
def __proxy__(self, value):
assert type(value) is UnitySFrameProxy
self._cache = None
self._proxy = value
self._cache = None
| bsd-3-clause |
heyigor/aubio | python/demos/demo_mfcc.py | 7 | 1722 | #! /usr/bin/env python
import sys
from aubio import source, pvoc, mfcc
from numpy import array, vstack, zeros
win_s = 512 # fft size
hop_s = win_s / 4 # hop size
n_filters = 40
n_coeffs = 13
samplerate = 44100
if len(sys.argv) < 2:
print "Usage: %s <source_filename>" % sys.argv[0]
sys.exit(1)
source_filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(source_filename, samplerate, hop_s)
samplerate = s.samplerate
p = pvoc(win_s, hop_s)
m = mfcc(win_s, n_filters, n_coeffs, samplerate)
desc = []
tdesc = []
mfccs = zeros([13,])
frames_read = 0
while True:
samples, read = s()
spec = p(samples)
mfcc_out = m(spec)
mfccs = vstack((mfccs, mfcc_out))
frames_read += read
if read < hop_s: break
# do plotting
from numpy import arange
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
import matplotlib.pyplot as plt
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot( source_filename, samplerate, block_size = hop_s, ax = wave)
wave.xaxis.set_visible(False)
wave.yaxis.set_visible(False)
all_times = arange(mfccs.shape[0]) * hop_s
n_coeffs = mfccs.shape[1]
for i in range(n_coeffs):
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_coeffs), 0.8, 0.65 / n_coeffs], sharex = wave )
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.plot(all_times, mfccs.T[i])
# add time to the last axis
set_xlabels_sample2time( ax, frames_read, samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
wave.set_title('MFCC for %s' % source_filename)
plt.show()
| gpl-3.0 |
CaymanUnterborn/burnman | burnman/tools.py | 3 | 28641 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from __future__ import print_function
import operator
import bisect
import os
import pkgutil
import numpy as np
from scipy.optimize import fsolve, curve_fit
from scipy.ndimage.filters import gaussian_filter
from scipy.interpolate import interp2d
from collections import Counter
import itertools
from . import constants
import itertools
def copy_documentation(copy_from):
"""
Decorator @copy_documentation(another_function) will copy the documentation found in a different
function (for example from a base class). The docstring applied to some function a() will be ::
(copied from BaseClass.some_function):
<documentation from BaseClass.some_function>
<optionally the documentation found in a()>
"""
def mydecorator(func):
def wrapper(*args):
return func(*args)
old = ""
if func.__doc__:
old = "\n" + func.__doc__
copied_from = ""
if hasattr(copy_from, "__name__"):
copied_from = "(copied from " + copy_from.__name__ + "):\n"
wrapper.__doc__ = copied_from + copy_from.__doc__ + old
wrapper.__name__ = func.__name__
return wrapper
return mydecorator
def flatten(l): return flatten(l[0]) + (flatten(l[1:]) if len(l) > 1 else []) if type(l) is list or type(l) is np.ndarray else [l]
def round_to_n(x, xerr, n):
return round(x, -int(np.floor(np.log10(np.abs(xerr)))) + (n - 1))
def unit_normalize(a, order=2, axis=-1):
"""
Calculates the L2 normalized array of numpy array a
of a given order and along a given axis.
"""
l2 = np.atleast_1d(np.apply_along_axis(np.linalg.norm, axis, a, order))
l2[l2==0] = 1
return a / np.expand_dims(l2, axis)[0][0]
def pretty_print_values(popt, pcov, params):
"""
Takes a numpy array of parameters, the corresponding covariance matrix
and a set of parameter names and prints the parameters and
principal 1-s.d.uncertainties (np.sqrt(pcov[i][i]))
in a nice text based format.
"""
for i, p in enumerate(params):
p_rnd = round_to_n(popt[i], np.sqrt(pcov[i][i]), 1)
c_rnd = round_to_n(np.sqrt(pcov[i][i]), np.sqrt(pcov[i][i]), 1)
if p_rnd != 0.:
p_expnt = np.floor(np.log10(np.abs(p_rnd)))
else:
p_expnt = 0.
scale = np.power(10., p_expnt)
nd = p_expnt - np.floor(np.log10(np.abs(c_rnd)))
print ('{0:s}: ({1:{4}{5}f} +/- {2:{4}{5}f}) x {3:.0e}'.format(p, p_rnd/scale, c_rnd/scale, scale, 0, (nd)/10.))
def pretty_print_table(table, use_tabs=False):
"""
Takes a 2d table and prints it in a nice text based format. If
use_tabs=True then only \t is used as a separator. This is useful for
importing the data into other apps (Excel, ...). The default is to pad
the columns with spaces to make them look neat. The first column is
left aligned, while the remainder is right aligned.
"""
if use_tabs:
for r in table:
print("\t".join(r).replace("_", "\_"))
return
def col_width(table, colidx):
return max([len(str(row[colidx])) for row in table])
# create a format string with the first column left aligned, the others right
# example: {:<27}{:>11}{:>6}{:>8}
frmt = "".join(
[('{:<' if i == 0 else '{:>') + str(1 + col_width(table, i)) + '}' for i in range(len(table[0]))])
for r in table:
print(frmt.format(*r))
def pretty_plot():
"""
Makes pretty plots. Overwrites the matplotlib default settings to allow
for better fonts. Slows down plotting
"""
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = '\\usepackage{relsize}'
plt.rc('font', family='sanserif')
def sort_table(table, col=0):
"""
Sort the table according to the column number
"""
return sorted(table, key=operator.itemgetter(col))
def float_eq(a, b):
"""
Test if two floats are almost equal to each other
"""
return abs(a - b) < 1e-10 * max(1e-5, abs(a), abs(b))
def linear_interpol(x, x1, x2, y1, y2):
"""
Linearly interpolate to point x, between
the points (x1,y1), (x2,y2)
"""
assert(x1 <= x)
assert(x2 >= x)
assert(x1 <= x2)
alpha = (x - x1) / (x2 - x1)
return (1. - alpha) * y1 + alpha * y2
def read_table(filename):
datastream = pkgutil.get_data('burnman', 'data/' + filename)
datalines = [line.strip()
for line in datastream.decode('ascii').split('\n') if line.strip()]
table = []
for line in datalines:
if (line[0] != '#'):
numbers = np.fromstring(line, sep=' ')
table.append(numbers)
return np.array(table)
def array_from_file(filename):
"""
Generic function to read a file containing floats and commented lines
into a 2D numpy array.
Commented lines are prefixed by the characters # or %.
"""
f = open(filename, 'r')
data = []
datastream = f.read()
f.close()
datalines = [line.strip().split()
for line in datastream.split('\n') if line.strip()]
for line in datalines:
if line[0] != "#" and line[0] != "%":
data.append(map(float, line))
data = np.array(zip(*data))
return data
def cut_table(table, min_value, max_value):
tablen = []
for i in range(min_value, max_value, 1):
tablen.append(table[i, :])
return tablen
def lookup_and_interpolate(table_x, table_y, x_value):
idx = bisect.bisect_left(table_x, x_value) - 1
if (idx < 0):
return table_y[0]
elif (idx < len(table_x) - 1):
return linear_interpol(x_value, table_x[idx], table_x[idx + 1],
table_y[idx], table_y[idx + 1])
else:
return table_y[idx]
def molar_volume_from_unit_cell_volume(unit_cell_v, z):
"""
Converts a unit cell volume from Angstroms^3 per unitcell,
to m^3/mol.
Parameters
----------
unit_cell_v : float
Unit cell volumes [A^3/unit cell]
z : float
Number of formula units per unit cell
Returns
-------
V : float
Volume [m^3/mol]
"""
V = unit_cell_v * constants.Avogadro / 1.e30 / z
return V
def equilibrium_pressure(minerals, stoichiometry, temperature, pressure_initial_guess=1.e5):
"""
Given a list of minerals, their reaction stoichiometries
and a temperature of interest, compute the
equilibrium pressure of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
temperature : float
Temperature of interest [K]
pressure_initial_guess : optional float
Initial pressure guess [Pa]
Returns
-------
pressure : float
The equilibrium pressure of the reaction [Pa]
"""
def eqm(P, T):
gibbs = 0.
for i, mineral in enumerate(minerals):
mineral.set_state(P[0], T)
gibbs = gibbs + mineral.gibbs * stoichiometry[i]
return gibbs
pressure = fsolve(eqm, [pressure_initial_guess], args=(temperature))[0]
return pressure
def equilibrium_temperature(minerals, stoichiometry, pressure, temperature_initial_guess=1000.):
"""
Given a list of minerals, their reaction stoichiometries
and a pressure of interest, compute the
equilibrium temperature of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
pressure : float
Pressure of interest [Pa]
temperature_initial_guess : optional float
Initial temperature guess [K]
Returns
-------
temperature : float
The equilibrium temperature of the reaction [K]
"""
def eqm(T, P):
gibbs = 0.
for i, mineral in enumerate(minerals):
mineral.set_state(P, T[0])
gibbs = gibbs + mineral.gibbs * stoichiometry[i]
return gibbs
temperature = fsolve(eqm, [temperature_initial_guess], args=(pressure))[0]
return temperature
def invariant_point(minerals_r1, stoichiometry_r1,
minerals_r2, stoichiometry_r2,
pressure_temperature_initial_guess=[1.e9, 1000.]):
"""
Given a list of minerals, their reaction stoichiometries
and a pressure of interest, compute the
equilibrium temperature of the reaction.
Parameters
----------
minerals : list of minerals
List of minerals involved in the reaction.
stoichiometry : list of floats
Reaction stoichiometry for the minerals provided.
Reactants and products should have the opposite signs [mol]
pressure : float
Pressure of interest [Pa]
temperature_initial_guess : optional float
Initial temperature guess [K]
Returns
-------
temperature : float
The equilibrium temperature of the reaction [K]
"""
def eqm(PT):
P, T = PT
gibbs_r1 = 0.
for i, mineral in enumerate(minerals_r1):
mineral.set_state(P, T)
gibbs_r1 = gibbs_r1 + mineral.gibbs * stoichiometry_r1[i]
gibbs_r2 = 0.
for i, mineral in enumerate(minerals_r2):
mineral.set_state(P, T)
gibbs_r2 = gibbs_r2 + mineral.gibbs * stoichiometry_r2[i]
return [gibbs_r1, gibbs_r2]
pressure, temperature = fsolve(eqm, pressure_temperature_initial_guess)
return pressure, temperature
def hugoniot(mineral, P_ref, T_ref, pressures, reference_mineral=None):
"""
Calculates the temperatures (and volumes) along a Hugoniot
as a function of pressure according to the Hugoniot equation
U2-U1 = 0.5*(p2 - p1)(V1 - V2) where U and V are the
internal energies and volumes (mass or molar) and U = F + TS
Parameters
----------
mineral : mineral
Mineral for which the Hugoniot is to be calculated.
P_ref : float
Reference pressure [Pa]
T_ref : float
Reference temperature [K]
pressures : numpy array of floats
Set of pressures [Pa] for which the Hugoniot temperature
and volume should be calculated
reference_mineral : mineral
Mineral which is stable at the reference conditions
Provides an alternative U_0 and V_0 when the reference
mineral transforms to the mineral of interest at some
(unspecified) pressure.
Returns
-------
temperatures : numpy array of floats
The Hugoniot temperatures at pressure
volumes : numpy array of floats
The Hugoniot volumes at pressure
"""
def Ediff(T, mineral, P, P_ref, U_ref, V_ref):
mineral.set_state(P, T[0])
U = mineral.helmholtz + T[0] * mineral.S
V = mineral.V
return (U - U_ref) - 0.5 * (P - P_ref) * (V_ref - V)
if reference_mineral is None:
reference_mineral = mineral
reference_mineral.set_state(P_ref, T_ref)
U_ref = reference_mineral.helmholtz + T_ref * reference_mineral.S
V_ref = reference_mineral.V
temperatures = np.empty_like(pressures)
volumes = np.empty_like(pressures)
for i, P in enumerate(pressures):
temperatures[i] = fsolve(
Ediff, [T_ref], args=(mineral, P, P_ref, U_ref, V_ref))[0]
volumes[i] = mineral.V
return temperatures, volumes
def convert_fractions(composite, phase_fractions, input_type, output_type):
"""
Takes a composite with a set of user defined molar, volume
or mass fractions (which do not have to be the fractions
currently associated with the composite) and
converts the fractions to molar, mass or volume.
Conversions to and from mass require a molar mass to be
defined for all phases. Conversions to and from volume
require set_state to have been called for the composite.
Parameters
----------
composite : Composite
Composite for which fractions are to be defined.
phase_fractions : list of floats
List of input phase fractions (of type input_type)
input_type : string
Input fraction type: 'molar', 'mass' or 'volume'
output_type : string
Output fraction type: 'molar', 'mass' or 'volume'
Returns
-------
output_fractions : list of floats
List of output phase fractions (of type output_type)
"""
if input_type == 'volume' or output_type == 'volume':
if composite.temperature == None:
raise Exception(
composite.to_string() + ".set_state(P, T) has not been called, so volume fractions are currently undefined. Exiting.")
if input_type == 'molar':
molar_fractions = phase_fractions
if input_type == 'volume':
total_moles = sum(
volume_fraction / phase.molar_volume for volume_fraction,
phase in zip(phase_fractions, composite.phases))
molar_fractions = [volume_fraction / (phase.molar_volume * total_moles)
for volume_fraction, phase in zip(phase_fractions, composite.phases)]
if input_type == 'mass':
total_moles = sum(mass_fraction / phase.molar_mass for mass_fraction,
phase in zip(phase_fractions, composite.phases))
molar_fractions = [mass_fraction / (phase.molar_mass * total_moles)
for mass_fraction, phase in zip(phase_fractions, composite.phases)]
if output_type == 'volume':
total_volume = sum(
molar_fraction * phase.molar_volume for molar_fraction,
phase in zip(molar_fractions, composite.phases))
output_fractions = [molar_fraction * phase.molar_volume /
total_volume for molar_fraction, phase in zip(molar_fractions, composite.phases)]
elif output_type == 'mass':
total_mass = sum(molar_fraction * phase.molar_mass for molar_fraction,
phase in zip(molar_fractions, composite.phases))
output_fractions = [molar_fraction * phase.molar_mass /
total_mass for molar_fraction, phase in zip(molar_fractions, composite.phases)]
elif output_type == 'molar':
output_fractions = molar_fractions
return output_fractions
def bracket(fn, x0, dx, args=(), ratio=1.618, maxiter=100):
"""
Given a function and a starting guess, find two
inputs for the function that bracket a root.
Parameters
----------
fn : function
The function to bracket
x0 : float
The starting guess
dx : float
Small step for starting the search
args : parameter list
Additional arguments to give to fn
ratio :
The step size increases by this ratio
every step in the search. Defaults to
the golden ratio.
maxiter : int
The maximum number of steps before giving up.
Returns
-------
xa, xb, fa, fb: floats
xa and xb are the inputs which bracket a root of fn.
fa and fb are the values of the function at those points.
If the bracket function takes more than maxiter steps,
it raises a ValueError.
"""
niter = 0
dx = np.abs(dx)
assert(ratio > 1.0)
# Get the starting positions
f0 = fn(x0, *args)
x_left = x0 - dx
x_right = x0 + dx
f_left = fn(x_left, *args)
f_right = fn(x_right, *args)
# Overshot zero, try making dx smaller
if (f0 - f_left) * (f_right - f0) < 0.:
while (f0 - f_left) * (f_right - f0) < 0. and dx > np.finfo('float').eps and niter < maxiter:
dx /= ratio
x_left = x0 - dx
x_right = x0 + dx
f_left = fn(x_left, *args)
f_right = fn(x_right, *args)
niter += 1
if niter == maxiter: # Couldn't find something with same slope in both directions
raise ValueError('Cannot find zero.')
niter = 0
slope = f_right - f0
if slope > 0. and f0 > 0.: # Walk left
dx = -dx
x1 = x_left
f1 = f_left
elif slope > 0. and f0 < 0.: # Walk right
x1 = x_right
f1 = f_right
elif slope < 0. and f0 > 0: # Walk right
x1 = x_right
f1 = f_right
else: # Walk left
dx = -dx
x1 = x_left
f1 = f_left
# Do the walking
while f0 * f1 > 0. and niter < maxiter:
dx *= ratio
xnew = x1 + dx
fnew = fn(xnew, *args)
x0 = x1
f0 = f1
x1 = xnew
f1 = fnew
niter += 1
if f0 * f1 > 0.:
raise ValueError('Cannot find zero.')
else:
return x0, x1, f0, f1
def check_eos_consistency(m, P=1.e9, T=300., tol=1.e-4, verbose=False):
"""
Compute numerical derivatives of the gibbs free energy of a mineral
under given conditions, and check these values against those provided
analytically by the equation of state
Parameters
----------
m : mineral
The mineral for which the equation of state
is to be checked for consistency
P : float
The pressure at which to check consistency
T : float
The temperature at which to check consistency
tol : float
The fractional tolerance for each of the checks
verbose : boolean
Decide whether to print information about each
check
Returns
-------
consistency: boolean
If all checks pass, returns True
"""
dT = 1.
dP = 1000.
m.set_state(P, T)
G0 = m.gibbs
S0 = m.S
V0 = m.V
expr = ['G = F + PV', 'G = H - TS', 'G = E - TS + PV']
eq = [[m.gibbs, (m.helmholtz + P*m.V)],
[m.gibbs, (m.H - T*m.S)],
[m.gibbs, (m.internal_energy - T*m.S + P*m.V)]]
m.set_state(P, T + dT)
G1 = m.gibbs
S1 = m.S
V1 = m.V
m.set_state(P + dP, T)
G2 = m.gibbs
V2 = m.V
# T derivatives
m.set_state(P, T + 0.5*dT)
expr.extend(['S = -dG/dT', 'alpha = 1/V dV/dT', 'C_p = T dS/dT'])
eq.extend([[m.S, -(G1 - G0)/dT],
[m.alpha, (V1 - V0)/dT/m.V],
[m.heat_capacity_p, (T + 0.5*dT)*(S1 - S0)/dT]])
# P derivatives
m.set_state(P + 0.5*dP, T)
expr.extend(['V = dG/dP', 'K_T = -V dP/dV'])
eq.extend([[m.V, (G2 - G0)/dP],
[m.K_T, -0.5*(V2 + V0)*dP/(V2 - V0)]])
expr.extend(['C_v = Cp - alpha^2*K_T*V*T', 'K_S = K_T*Cp/Cv', 'gr = alpha*K_T*V/Cv'])
eq.extend([[m.heat_capacity_v, m.heat_capacity_p - m.alpha*m.alpha*m.K_T*m.V*T],
[m.K_S, m.K_T*m.heat_capacity_p/m.heat_capacity_v],
[m.gr, m.alpha*m.K_T*m.V/m.heat_capacity_v]])
expr.extend(['Vphi = np.sqrt(K_S/rho)', 'Vp = np.sqrt((K_S + 4G/3)/rho)', 'Vs = np.sqrt(G_S/rho)'])
eq.extend([[m.bulk_sound_velocity, np.sqrt(m.K_S/m.rho)],
[m.p_wave_velocity, np.sqrt((m.K_S + 4.*m.G/3.)/m.rho)],
[m.shear_wave_velocity, np.sqrt(m.G/m.rho)]])
consistencies = [np.abs(e[0] - e[1]) < np.abs(tol*e[1]) + np.finfo('float').eps for e in eq]
consistency = np.all(consistencies)
if verbose == True:
print('Checking EoS consistency for {0:s}'.format(m.to_string()))
print('Expressions within tolerance of {0:2f}'.format(tol))
for i, c in enumerate(consistencies):
print('{0:10s} : {1:5s}'.format(expr[i], str(c)))
if consistency == True:
print('All EoS consistency constraints satisfied for {0:s}'.format(m.to_string()))
else:
print('Not satisfied all EoS consistency constraints for {0:s}'.format(m.to_string()))
return consistency
def _pad_ndarray_inverse_mirror(array, padding):
"""
Pads an ndarray according to an inverse mirror
scheme. For example, for a 1D array
[2, 4, 6, 7, 8] padded by 3 cells, we have:
padding | original array | padding
-3 -2 0 | 2 4 6 7 8 | 9 10 12
Parameters
----------
array : numpy ndarray
The array to be padded
padding : tuple
The number of elements with which to pad the
array in each dimension.
Returns
-------
padded_array: numpy ndarray
The padded array
"""
padded_shape = [n + 2*padding[i] for i, n in enumerate(array.shape)]
padded_array = np.zeros(padded_shape)
slices = tuple([ slice(padding[i], padding[i] + l) for i, l in enumerate(array.shape)])
padded_array[slices] = array
padded_array_indices = list(itertools.product(*[range(n + 2*padding[i]) for i, n in enumerate(array.shape)]))
inserted_indices = list(itertools.product(*[range(padding[i], padding[i] + l) for i, l in enumerate(array.shape)]))
padded_array_indices.extend(inserted_indices)
counter = Counter(padded_array_indices)
keys = list(counter.keys())
padded_indices = [keys[i] for i, value in enumerate(counter.values()) if value == 1]
edge_indices = tuple([tuple([np.min([np.max([axis_idx, padding[dimension]]), padded_array.shape[dimension] - padding[dimension] - 1])
for dimension, axis_idx in enumerate(idx)]) for idx in padded_indices])
mirror_indices = tuple([tuple([2*edge_indices[i][j] - padded_indices[i][j] for j in range(len(array.shape))]) for i in range(len(padded_indices))])
for i, idx in enumerate(padded_indices):
padded_array[idx] = 2.*padded_array[edge_indices[i]] - padded_array[mirror_indices[i]]
return padded_array
def smooth_array(array, grid_spacing,
gaussian_rms_widths, truncate=4.0,
mode='inverse_mirror'):
"""
Creates a smoothed array by convolving it with a gaussian filter.
Grid resolutions and gaussian RMS widths are required for each of
the axes of the numpy array. The smoothing is truncated at a
user-defined number of standard deviations. The edges of the array
can be padded in a number of different ways given by the
'mode' parameter.
Parameters
----------
array : numpy ndarray
The array to smooth
grid_spacing : numpy array of floats
The spacing of points along each axis
gaussian_rms_widths : numpy array of floats
The Gaussian RMS widths/standard deviations for the
Gaussian convolution.
truncate : float (default=4.)
The number of standard deviations at which to truncate
the smoothing.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap', 'inverse_mirror'}
The mode parameter determines how the array borders are handled
either by scipy.ndimage.filters.gaussian_filter.
Default is 'inverse_mirror', which uses
burnman.tools._pad_ndarray_inverse_mirror().
Returns
-------
smoothed_array: numpy ndarray
The smoothed array
"""
# gaussian_filter works with standard deviations normalised to
# the grid spacing.
sigma = tuple(np.array(gaussian_rms_widths)/np.array(grid_spacing))
if mode == 'inverse_mirror':
padding = tuple([int(np.ceil(truncate*s)) for s in sigma])
padded_array = _pad_ndarray_inverse_mirror(array, padding)
smoothed_padded_array = gaussian_filter(padded_array,
sigma=sigma)
slices = tuple([ slice(padding[i], padding[i] + l) for i, l in enumerate(array.shape)])
smoothed_array = smoothed_padded_array[slices]
else:
smoothed_array = gaussian_filter(array, sigma=sigma, mode=mode)
return smoothed_array
def interp_smoothed_array_and_derivatives(array,
x_values, y_values,
x_stdev=0., y_stdev=0.,
truncate=4.,
mode='inverse_mirror',
indexing='xy'):
"""
Creates a smoothed array on a regular 2D grid. Smoothing
is achieved using burnman.tools.smooth_array().
Outputs scipy.interpolate.interp2d() interpolators
which can be used to query the array, or its derivatives in the
x- and y- directions.
Parameters
----------
array : 2D numpy array
The array to smooth. Each element array[i][j]
corresponds to the position x_values[i], y_values[j]
x_values : 1D numpy array
The gridded x values over which to create the smoothed grid
y_values : 1D numpy array
The gridded y_values over which to create the smoothed grid
x_stdev : float
The standard deviation for the Gaussian filter along the x axis
y_stdev : float
The standard deviation for the Gaussian filter along the x axis
truncate : float (optional)
The number of standard deviations at which to truncate
the smoothing (default = 4.).
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap', 'inverse_mirror'}
The mode parameter determines how the array borders are handled
either by scipy.ndimage.filters.gaussian_filter.
Default is 'inverse_mirror', which uses
burnman.tools._pad_ndarray_inverse_mirror().
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See numpy.meshgrid for more details.
Returns
-------
interps: tuple of three interp2d functors
interpolation functions for the smoothed property and
the first derivatives with respect to x and y.
"""
dx = x_values[1] - x_values[0]
dy = y_values[1] - y_values[0]
if indexing == 'xy':
smoothed_array = smooth_array(array = array,
grid_spacing = np.array([dy, dx]),
gaussian_rms_widths = np.array([y_stdev, x_stdev]),
truncate=truncate,
mode=mode)
elif indexing == 'ij':
smoothed_array = smooth_array(array = array,
grid_spacing = np.array([dx, dy]),
gaussian_rms_widths = np.array([x_stdev, y_stdev]),
truncate=truncate,
mode=mode).T
else:
raise Exception('Indexing scheme not recognised. Should be ij or xy.')
dSAdydy, dSAdxdx = np.gradient(smoothed_array)
interps = (interp2d(x_values, y_values, smoothed_array, kind='linear'),
interp2d(x_values, y_values, dSAdxdx/dx, kind='linear'),
interp2d(x_values, y_values, dSAdydy/dy, kind='linear'))
return interps
def attribute_function(m, attributes, powers=[]):
"""
Function which returns a function which can be used to
evaluate material properties at a point. This function
allows the user to define the property returned
as a string. The function can itself be passed to another
function
(such as nonlinear_fitting.confidence_prediction_bands()).
Properties can either be simple attributes (e.g. K_T) or
a product of attributes, each raised to some power.
Parameters
----------
m : Material
The material instance evaluated by the output function.
attributes : list of strings
The list of material attributes / properties to
be evaluated in the product
powers : list of floats
The powers to which each attribute should be raised
during evaluation
Returns
-------
f : function(x)
Function which returns the value of product(a_i**p_i)
as a function of condition (x = [P, T, V])
"""
if type(attributes) is str:
attributes = [attributes]
if powers == []:
powers = [1. for a in attributes]
def f(x):
P, T, V = x
m.set_state(P, T)
value = 1.
for a, p in zip(*[attributes, powers]):
value *= np.power(getattr(m, a), p)
return value
return f
| gpl-2.0 |
pprett/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
mira67/DL4RS | MPFprj/test.py | 1 | 4132 | """
Test Main
Update config.ini for model and data
Author: Qi Liu, 11/2016
"""
import os
import logging
import timeit
import ConfigParser
import numpy as np
from prjutil import read_config
from keras.models import load_model
from dbaccess import pdread,sqlwrite
import pandas as pd
import glob
from sklearn.preprocessing import normalize
#
import multiprocessing
def model_test(p):
#load model
model = load_model(p['model_path']+p['model_name'])
df = pdread(p['test_sql'])
df = df.replace(-9999, 0)
#df['julian'][(df['julian'] < 213) & (df['julian'] > 152)] = 0.1
basen = 2
julian_m = 197.5
alpha = 10
beta = 0.01
#df['julian'] = alpha*np.power(df['julian']-julian_m, basen)/np.power(274,basen)+beta
print df.head(n=5)
#df = minmaxscaler(df)
data = df.as_matrix()
X_predict = data[:,0:]
#X_predict = normalize(X_predict,norm='l2',axis=0)
# print X_predict[1:5,:]
#predict with model
Y_predict = model.predict(X_predict)*100
df = pd.DataFrame(Y_predict, columns=['MPF', 'IF', 'WF'])
#record to mysql
df.to_csv(p['result_path']+p['test_result_csv'], sep=',', encoding='utf-8')
#also write to database
sqlwrite(p['result_path'], p['test_result_csv'], p['csvtosql'])
def dual_model(p):
#load model
model = load_model(p['model_path']+p['model_name'])
#model2 = load_model(p['model_path']+p['model_name2'])
df = pdread(p['test_sql'])
df = df.replace(-9999, 0)
#print df.head(n=5)
data = df.as_matrix()
attr_n = 7
attr = data[:,0:attr_n]#year,month,day,nrow,ncol
X_predict = data[:,attr_n:attr_n+p['fea_num']]
d_month = data[:,p['fea_num']]
#X_predict = normalize(X_predict,norm='l2',axis=0)
print X_predict[1:attr_n,:]
Y_predict = model.predict(X_predict)*100
final_data = np.concatenate((attr,Y_predict), axis=1)
print final_data.shape
df = pd.DataFrame(final_data, columns=['year','month','day','nrow','ncol','qc','cloud','MPF', 'IF', 'WF'])
#record to mysql
with open(p['result_path']+p['test_result_csv'], 'a') as f:
df.to_csv(f, sep=',', encoding='utf-8',header=False)
#also write to database
sqlwrite(p['result_path'], p['test_result_csv'], p['csvtosql'])
def fast_test(p):
#create processing
pc = multiprocessing.Pool(processes = 1)
#load test files
tlist = glob.glob(p['test_path']+'*.csv')
for tf in tlist:
# launch a process for each file (ish).
# The result will be approximately one process per CPU core available.
pc.apply_async(arctic_test, [tf,p])
pc.close()
pc.join() # Wait for all child processes to close.
def arctic_test(tf,p):
#load model
model = load_model(p['model_path']+p['model_name'])
df = pd.read_csv(tf, sep=",", skiprows=1, names = ["year","month","day","nrow","ncol","qc","cloud","b1","b2","b3","b4","b5","b6","b7"])
nsplit = os.path.basename(tf).split('.')
print nsplit[0]
df = df.replace(-9999, 0)
#run through model
data = df.as_matrix()
attr_n = 7
X_predict = data[:,attr_n:attr_n+p['fea_num']]
Y_predict = model.predict(X_predict)*100
final_data = np.concatenate((data,Y_predict), axis=1)
df = pd.DataFrame(final_data, columns=["year","month","day","nrow","ncol","qc","cloud","b1","b2","b3","b4","b5","b6","b7","mpf","if","wf"])
df['mpf'][df['b1'] < -2] = -9999
df['if'][df['b1'] < -2] = -9999
df['wf'][df['b1'] < -2] = -9999
#record to mysql
with open(p['result_path']+os.path.basename(tf), 'w') as f:
df.to_csv(f, sep=',', encoding='utf-8',index=False)
def main():
logging.basicConfig(filename='testing.log', level=logging.INFO)
logging.info('Started Testing')
#read config
p = read_config();
logging.info('Testing with Model: ' + str(p['model_id']))
#model_test(p)
#dual_model(p)
fast_test(p)
#sqlwrite(p['result_path'], p['test_result_csv'], p['csvtosql'])
os.system('espeak "done"')
if __name__ == '__main__':
logging.info("Testing Time (s): " + str(timeit.timeit("main()", setup="from __main__ import main", number=1)))
| gpl-3.0 |
spallavolu/scikit-learn | sklearn/decomposition/nmf.py | 100 | 19059 | """ Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <[email protected]>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 261 | 2836 | # Author: Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
smrjan/seldon-server | docker/pyseldon/scripts/start_prediction_default_rpc_microservice.py | 2 | 1110 | from concurrent import futures
import time
import sys, getopt, argparse
import seldon.pipeline.util as sutl
import random
import grpc
import google.protobuf
from google.protobuf import any_pb2
import pandas as pd
from seldon.microservice.rpc import CustomDataHandler
from seldon.microservice import Microservices
if __name__ == "__main__":
import logging
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(name)s : %(message)s', level=logging.DEBUG)
logger.setLevel(logging.INFO)
parser = argparse.ArgumentParser(prog='microservice')
parser.add_argument('--model-name', help='name of model', required=True)
parser.add_argument('--pipeline', help='location of prediction pipeline', required=True)
parser.add_argument('--aws-key', help='aws key', required=False)
parser.add_argument('--aws-secret', help='aws secret', required=False)
args = parser.parse_args()
opts = vars(args)
m = Microservices(aws_key=args.aws_key,aws_secret=args.aws_secret)
m.create_prediction_rpc_microservice(args.pipeline,args.model_name)
| apache-2.0 |
cauchycui/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
Adai0808/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
Aasmi/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
mwv/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 232 | 4761 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
| bsd-3-clause |
antoinecarme/pyaf | tests/basic_checks/test_temporal_demo_hourly_D_H_ErrorMessage.py | 1 | 1208 | # %matplotlib inline
import pyaf
import numpy as np
import pandas as pd
DATA_FREQ = 'H'
PERIODS = ["D" , "H"]
H = 365
N = H * 10
lDateColumn = "Date"
lSignalVar = "Signal";
START_TIME = "2001-01-25"
# generate a daily signal covering one year 2016 in a pandas dataframe
np.random.seed(seed=1960)
df_train = pd.DataFrame({lDateColumn : pd.date_range(start=START_TIME, periods=N, freq=DATA_FREQ),
lSignalVar : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))})
# print(df_train.head(N))
lHierarchy = {};
lHierarchy['Levels'] = None;
lHierarchy['Data'] = None;
lHierarchy['Groups']= {};
lHierarchy['Periods']= PERIODS
lHierarchy['Type'] = "Temporal";
import pyaf.HierarchicalForecastEngine as hautof
lEngine = hautof.cHierarchicalForecastEngine()
# lEngine.mOptions.mNbCores = 1
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lFailed = None
try:
lSignalHierarchy = lEngine.train(df_train , lDateColumn, lSignalVar, H, lHierarchy, None);
lFailed = False
except Exception as lEx:
print("ERROR" , lEx.__class__, lEx)
lFailed = True
if(not lFailed):
raise Exception("NORMAL_BEHAVIOR_NOT_EXPECTED_SHOULD_HAVE_FAILED")
| bsd-3-clause |
aiguofer/bokeh | examples/charts/server/interactive_excel.py | 6 | 3225 | import xlwings as xw
import pandas as pd
from pandas.util.testing import assert_frame_equal
from bokeh.client import push_session
from bokeh.charts import Line, Bar
from bokeh.charts.operations import blend
from bokeh.io import curdoc
from bokeh.layouts import row, column
from bokeh.models import Paragraph
wb = xw.Workbook() # Creates a connection with a new workbook
# write example data to notebook
xw.Range('A1').value = pd.DataFrame(
{
'Italy':[3016.17,3114.73, 3128.31, 3137.38, 3089.51, 3016.32, 2942.62, 2735.05, 2813.51],
'Japan':[4004.67, 3963.47, 4089.39, 4073.75, 4068.52, 4031.99, 3880.45, 3700.22, 3883.046557],
'Brazil':[1084.48, 1075.76, 1092.31, 1096.13, 1140.61, 1158.39, 1186.27, 1240.22, 1297.91],
'USA':[8056.55, 7825.18, 7838.52, 7788.32, 7875.28, 7840.53, 7691.69, 7749.23, 7481.02],
'year':[2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008],
})
# read back to make sure we have same data format..
data = xw.Range('A1').table.value
energy_per_capita = pd.DataFrame(data[1:], columns=data[0])
countries = ['Brazil', 'Italy', 'USA', 'Japan']
def create_line(data):
""" Convenience function to create a new line chart with the right args """
return Line(data, x='year', y=countries,
legend=True, width=1400, height=300, ylabel='Energy use per capita',
palette=['purple', 'green', 'blue', 'pink'])
def create_bar(data):
op = blend(*countries, labels_name='countries', name='energy')
return Bar(data, label='year', values=op, color='countries', group='countries',
width=1400, height=600, ylabel='Energy use per capita',
palette=['purple', 'green', 'blue', 'pink'],
legend=True)
def data_changed(old):
""" Returns a new dataframe if data has changed on the excel workbook """
data = xw.Range('A1').table.value
df = pd.DataFrame(data[1:], columns=data[0])
try:
assert_frame_equal(df, old)
return None
except AssertionError:
return df
# open a session to keep our local document in sync with server
session = push_session(curdoc())
def update():
global layout
global energy_per_capita
new_df = data_changed(energy_per_capita)
if new_df is not None:
energy_per_capita = new_df
plots_box.children[0] = create_line(energy_per_capita)
plots_box.children[1] = create_bar(energy_per_capita)
line = create_line(energy_per_capita)
bar = create_bar(energy_per_capita)
desc1 = Paragraph(text="""
This example shows live integration between bokeh server and Excel using
XLWings.""")
desc2 = Paragraph(text="""
*** YOU MUST HAVE EXCEL and XLWINGS INSTALLED ON YOUR MACHINE FOR IT TO WORK ***
""")
desc3 = Paragraph(text="""
It opens this plots window and an excel spreadsheet instance with the
values being plotted. When user changes the values on the excel spreadsheet
the plots will be updated accordingly. It's not required to save the spreadsheet for the plots to update.
""")
plots_box = row(line, bar)
layout = column(desc1, desc2, desc3, plots_box)
curdoc().add_root(layout)
curdoc().add_periodic_callback(update, 500)
session.show() # open the document in a browser
session.loop_until_closed() # run forever
| bsd-3-clause |
mgaitan/scipy | tools/refguide_check.py | 29 | 23595 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser, REMAINDER
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'special',
'stats',
'stats.mstats',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
skip_types = (dict, str, unicode, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
def check_doctests(module, verbose, dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
# the namespace to run examples in
ns = {'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
# if MPL is available, use display-less backend
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim'}
def __init__(self, parse_namedtuples=True, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(ns))
a_got = eval(got, dict(ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except TypeError:
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
# Loop over non-deprecated items
results = []
all_success = True
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
all_success = False
if have_matplotlib:
plt.close('all')
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
return results
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=list(PUBLIC_SUBMODULES),
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
args = parser.parse_args(argv)
modules = []
names_dict = {}
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
zedyang/vnpy | vn.datayes/storage.py | 29 | 18623 | import os
import json
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from api import Config, PyApi
from api import BaseDataContainer, History, Bar
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError, VNPAST_DatabaseError)
class DBConfig(Config):
"""
Json-like config object; inherits from Config()
Contains all kinds of settings relating to database settings.
privates
--------
Inherited from api.Config, plus:
* client: pymongo.MongoClient object, the connection
that is to be used for this session.
* body: dictionary; the main content of config.
- client: pymongo.MongoClient(), refers to self.client.
- dbs: dictionary, is a mapping from database alias
to another dictionary, which inclues configurations
and themselves(i.e. pymongo.database entity)
Concretely, dbs has the structure like:
{
alias1 : {
'self': client[dbName1],
'index': dbIndex1,
'collNames': collectionNameType1
},
alias2 : {
'self': client[dbName2],
'index': dbIndex2,
'collNames': collectionNameType2
}, ...
}
where alias#: string;
dbs.alias#.self: pymongo.database;
dbs.alias#.index: string;
dbs.alias#.collNames: string;
- dbNames: list; a list of database alias.
"""
head = 'DB config'
client = pymongo.MongoClient()
body = {
'client': client,
'dbs': {
'EQU_M1': {
'self': client['DATAYES_EQUITY_M1'],
'index': 'dateTime',
'collNames': 'secID'
},
'EQU_D1': {
'self': client['DATAYES_EQUITY_D1'],
'index': 'date',
'collNames': 'equTicker'
},
'FUT_D1': {
'self': client['DATAYES_FUTURE_D1'],
'index': 'date',
'collNames': 'futTicker'
},
'OPT_D1': {
'self': client['DATAYES_OPTION_D1'],
'index': 'date',
'collNames': 'optTicker'
},
'FUD_D1': {
'self': client['DATAYES_FUND_D1'],
'index': 'date',
'collNames': 'fudTicker'
},
'IDX_D1': {
'self': client['DATAYES_INDEX_D1'],
'index': 'date',
'collNames': 'idxTicker'
}
},
'dbNames': ['EQU_M1', 'EQU_D1', 'FUT_D1',
'OPT_D1', 'FUD_D1', 'IDX_D1']
}
def __init__(self, head=None, token=None, body=None):
"""
Inherited constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
super(DBConfig, self).__init__(head, token, body)
def view(self):
""" Reloaded Prettify printing method. """
config_view = {
'dbConfig_head' : self.head,
'dbConfig_body' : str(self.body),
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# MongoDB Controller class
class MongodController(object):
"""
The MongoDB controller interface.
MongodController is initialized with a DBConfig configuration
object and a PyApi object, which has already been contructed with
its own Config json. The default version of constructor actually
does nothing special about the database. Yet if user executes shell
script prepare.sh to prepare the connection, MongodController will
firstly gather symbols that are going to become collection names
in corresponding databases. This process is done one database by another,
user can skip useless databases by editing the scripts.
Then, it ensures the index of each collection due to the 'index' value
in DBConfig.body.dbs. Concretely, for D1 bars, the index will be 'date',
and for intraday bars, it will be 'dateTime'; both take the form of
datetime.datetime timestamp.
download() and update() methods of controller dynamically construct
and maintain the databases, requesting data via PyApi. Once the database
is constructed, MongodController can access required data via its fetch()
method.
privates
--------
* _config: DBConfig object; a container of all useful settings for the
databases.
* _api: PyApi object; is responsible for making requests.
* _client: pymongo.MongoClient object; the connection to MongoDB.
* _dbs: dictionary; a mapping from database names to another dictionary,
which includes configurations of the database and the pymongo.database
entity. Inherited from _config.body.['dbs']. Note that keys
self._dbs are mere strings, only self._dbs[key]['self'] refers to the
pymongo.Database object.
* _dbNames: list; a list of names of databases.
* _collNames: dictionary; mapping from self._db[key]['collNames'] attribute
to the names of collections(i.e. tickers) within.
- example: _collNames['equTicker'] = ['000001', '000002', ...]
* _connected: boolean; whether the MongoClient was connected to or not.
* _mapTickersToSecIDs: dictionary; mapping from stock tickers to
its security ID.
example
-------
>> myApi = PyApi(Config())
>> mydbs = DBConfig()
>> controller = MongodController(mydbs, myApi)
>> controller._get_coll_names()
>> controller._ensure_index()
>> controller.download_equity_D1(20130101, 20150801)
>> controller.update_equity_D1()
"""
_config = DBConfig()
_api = None
_client = None
_dbs = None
_dbNames = []
_collNames = dict()
_connected = False
_mapTickersToSecIDs = dict()
def __init__(self, config, api):
"""
Constructor.
parameters
----------
* config: DBConfig object; specifies database configs.
* api: PyApi object.
"""
self._api = api # Set Datayes PyApi.
if config.body:
try:
self._config = config.body
self._client = config.body['client']
self._dbs = config.body['dbs']
self._dbNames = config.body['dbNames']
self._connected = True
except KeyError:
msg = '[MONGOD]: Unable to configure database; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[MONGOD]: Unable to configure database; ' + str(e)
raise VNPAST_ConfigError(msg)
if self._connected:
#self._get_coll_names()
#self._ensure_index()
pass
def view(self):
"""
NOT IMPLEMENTED
"""
return
#----------------------------------------------------------------------
# Get collection names methods.
"""
Decorator;
Targeting at path dName, if exists, read data from this file;
if not, execute handle() which returns a json-like data and
stores the data at dName path.
parameters
----------
* dName: string; the specific path of file that __md looks at.
"""
def __md(dName):
def _md(get):
def handle(*args, **kwargs):
try:
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
data = json.loads(jsonFile.read())
jsonFile.close()
else:
# if not, get data via *get method,
# then write to the file.
data = get(*args, **kwargs)
jsonFile = open(dName, 'w+')
jsonFile.write(json.dumps(data))
jsonFile.close()
#print data
return data
except Exception,e:
raise e
return handle
return _md
@__md('names/equTicker.json')
def _allEquTickers(self):
"""get all equity tickers, decorated by @__md()."""
data = self._api.get_equity_D1()
allEquTickers = list(data.body['ticker'])
return allEquTickers
@__md('names/secID.json')
def _allSecIds(self):
"""get all security IDs, decorated by @__md()."""
data = self._api.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
return allSecIds
@__md('names/futTicker.json')
def _allFutTickers(self):
"""get all future tickers, decorated by @__md()."""
data = self._api.get_future_D1()
allFutTickers = list(data.body['ticker'])
return allFutTickers
@__md('names/optTicker.json')
def _allOptTickers(self):
"""get all option tickers, decorated by @__md()."""
data = self._api.get_option_D1()
allOptTickers = list(data.body['ticker'])
return allOptTickers
@__md('names/fudTicker.json')
def _allFudTickers(self):
"""get all fund tickers, decorated by @__md()."""
data = self._api.get_fund_D1()
allFudTickers = list(data.body['ticker'])
return allFudTickers
@__md('names/idxTicker.json')
def _allIdxTickers(self):
"""get all index tickers, decorated by @__md()."""
data = self._api.get_index_D1()
allIdxTickers = list(data.body['ticker'])
return allIdxTickers
@__md('names/bndTicker.json')
def _allBndTickers(self):
"""get all bond tickers, decorated by @__md()."""
data = self._api.get_bond_D1()
allBndTickers = list(data.body['ticker'])
return allBndTickers
def _get_coll_names(self):
"""
get all instruments'names and store them in self._collNames.
"""
try:
if not os.path.exists('names'):
os.makedirs('names')
self._collNames['equTicker'] = self._allEquTickers()
self._collNames['fudTicker'] = self._allFudTickers()
self._collNames['secID'] = self._allSecIds()
self._collNames['futTicker'] = self._allFutTickers()
self._collNames['optTicker'] = self._allOptTickers()
self._collNames['idxTicker'] = self._allIdxTickers()
print '[MONGOD]: Collection names gotten.'
return 1
except AssertionError:
warning = '[MONGOD]: Warning, collection names ' + \
'is an empty list.'
print warning
except Exception, e:
msg = '[MONGOD]: Unable to set collection names; ' + \
str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Ensure collection index method.
def _ensure_index(self):
"""
Ensure indices for all databases and collections.
first access self._dbs config to get index column names;
then get collection names from self._collNames and loop
over all collections.
"""
if self._collNames and self._dbs:
try:
for dbName in self._dbs:
# Iterate over database configurations.
db = self._dbs[dbName]
dbSelf = db['self']
index = db['index']
collNames = self._collNames[db['collNames']]
# db['self'] is the pymongo.Database object.
for name in collNames:
coll = dbSelf[name]
coll.ensure_index([(index,
pymongo.DESCENDING)], unique=True)
print '[MONGOD]: MongoDB index set.'
return 1
except KeyError:
msg = '[MONGOD]: Unable to set collection indices; ' + \
'infomation in Config.body["dbs"] is incomplete.'
raise VNPAST_DatabaseError(msg)
except Exception, e:
msg = '[MONGOD]: Unable to set collection indices; ' + str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Download method.
def download_equity_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['EQU_D1']['self']
self._api.get_equity_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_equity_M1(self, tasks, startYr=2012, endYr=2015):
"""
"""
try:
# map equity tickers to security IDs.
if self._mapTickersToSecIDs:
maps = self._mapTickersToSecIDs
else:
assert os.isfile('./names/secID.json')
jsonFile = open(dName,'r')
allSecIds = json.loads(jsonFile.read())
jsonFile.close()
allTickers = [s.split('.')[0] for s in allSecIds]
maps = dict(zip(allTickers, allSecIds))
self._mapTickersToSecIDs = maps
tasks_ = [maps[task] for task in tasks]
db = self._dbs['EQU_M1']['self']
self._api.get_equity_M1_interMonth(db, id=1,
startYr = startYr,
endYr = endYr,
tasks = tasks_)
except AssertionError:
msg = '[MONGOD]: Cannot map tickers to secIDs; ' + \
'secID.json does not exist.'
raise VNPAST_DatabaseError(msg)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_bond_D1(self, start, end, sessionNum=30):
"""
"""
pass
def download_future_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['FUT_D1']['self']
self._api.get_future_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_option_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['OPT_D1']['self']
self._api.get_option_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_index_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['IDX_D1']['self']
self._api.get_index_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_fund_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['FUD_D1']['self']
self._api.get_fund_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Update methods.
def __update(self, key, target1, target2, sessionNum):
"""
Basic update method.
Looks into the database specified by 'key', find the latest
record in the collection of it. Then update the collections
till last trading date.
parameters
----------
* key: string; a database alias (refer to the database config)
e.g., 'EQU_D1'.
* target1: method; pointer to the function with which controller
obtain all tickers in the database. Concretely, target1 are
self._all#Tickers methods.
* target2: method; pointer to the api overlord requesting functions
i.e. self._api.get_###_mongod methods.
* sessionNum: integer; the number of threads.
"""
try:
# get databases and tickers
db = self._dbs[key]['self']
index = self._dbs[key]['index']
allTickers = target1()
coll = db[allTickers[0]]
# find the latest timestamp in collection.
latest = coll.find_one(
sort=[(index, pymongo.DESCENDING)])[index]
start = datetime.strftime(
latest + timedelta(days=1),'%Y%m%d')
end = datetime.strftime(datetime.now(), '%Y%m%d')
# then download.
target2(db, start, end, sessionNum)
return db
except Exception, e:
msg = '[MONGOD]: Unable to update data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def update_equity_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'EQU_D1',
target1 = self._allEquTickers,
target2 = self._api.get_equity_D1_mongod,
sessionNum = sessionNum)
return db
def update_future_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'FUT_D1',
target1 = self._allFutTickers,
target2 = self._api.get_future_D1_mongod,
sessionNum = sessionNum)
return db
def update_option_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'OPT_D1',
target1 = self._allOptTickers,
target2 = self._api.get_option_D1_mongod,
sessionNum = sessionNum)
return db
def update_index_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'IDX_D1',
target1 = self._allIdxTickers,
target2 = self._api.get_index_D1_mongod,
sessionNum = sessionNum)
return db
def update_fund_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'FUD_D1',
target1 = self._allFudTickers,
target2 = self._api.get_fund_D1_mongod,
sessionNum = sessionNum)
return db
#----------------------------------------------------------------------#
# stuff that will be deprecated
def update_equity_D1_(self, sessionNum=30):
"""
"""
try:
# set databases and tickers
db = self._dbs['EQU_D1']['self']
index = self._dbs['EQU_D1']['index']
allEquTickers = self._allEquTickers()
coll = db[allEquTickers[0]]
# find the latest timestamp in collection.
latest = coll.find_one(
sort=[(index, pymongo.DESCENDING)])[index]
start = datetime.strftime(latest + timedelta(days=1),'%Y%m%d')
end = datetime.strftime(datetime.now(), '%Y%m%d')
# then download.
self._api.get_equity_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to update data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def update_equity_M1(self):
"""
"""
pass
#----------------------------------------------------------------------
# Fetch method.
def fetch(self, dbName, ticker, start, end, output='list'):
"""
"""
# check inputs' validity.
if output not in ['df', 'list', 'json']:
raise ValueError('[MONGOD]: Unsupported output type.')
if dbName not in self._dbNames:
raise ValueError('[MONGOD]: Unable to locate database name.')
db = self._dbs[dbName]
dbSelf = db['self']
dbIndex = db['index']
try:
coll = db[ticker]
if len(start)==8 and len(end)==8:
# yyyymmdd, len()=8
start = datetime.strptime(start, '%Y%m%d')
end = datetime.strptime(end, '%Y%m%d')
elif len(start)==14 and len(end)==14:
# yyyymmdd HH:MM, len()=14
start = datetime.strptime(start, '%Y%m%d %H:%M')
end = datetime.strptime(end, '%Y%m%d %H:%M')
else:
pass
docs = []
# find in MongoDB.
for doc in coll.find(filter={dbIndex: {'$lte': end,
'$gte': start}}, projection={'_id': False}):
docs.append(doc)
if output == 'list':
return docs[::-1]
except Exception, e:
msg = '[MONGOD]: Error encountered when fetching data' + \
'from MongoDB; '+ str(e)
return -1
if __name__ == '__main__':
dc = DBConfig()
api = PyApi(Config())
mc = MongodController(dc, api)
mc.update_index_D1()
| mit |
JsNoNo/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
jsilter/scipy | scipy/stats/kde.py | 9 | 18242 | #-------------------------------------------------------------------------------
#
# Define classes for (uni/multi)-variate kernel density estimation.
#
# Currently, only Gaussian kernels are implemented.
#
# Written by: Robert Kern
#
# Date: 2004-08-09
#
# Modified: 2005-02-10 by Robert Kern.
# Contributed to Scipy
# 2005-10-07 by Robert Kern.
# Some fixes to match the new scipy_core
#
# Copyright 2004-2005 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
from __future__ import division, print_function, absolute_import
# Standard library imports.
import warnings
# Scipy imports.
from scipy.lib.six import callable, string_types
from scipy import linalg, special
from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \
ravel, power, atleast_1d, squeeze, sum, transpose
import numpy as np
from numpy.random import randint, multivariate_normal
# Local imports.
from . import mvn
__all__ = ['gaussian_kde']
class gaussian_kde(object):
"""Representation of a kernel-density estimate using Gaussian kernels.
Kernel density estimation is a way to estimate the probability density
function (PDF) of a random variable in a non-parametric way.
`gaussian_kde` works for both uni-variate and multi-variate data. It
includes automatic bandwidth determination. The estimation works best for
a unimodal distribution; bimodal or multi-modal distributions tend to be
oversmoothed.
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a scalar,
this will be used directly as `kde.factor`. If a callable, it should
take a `gaussian_kde` instance as only parameter and return a scalar.
If None (default), 'scott' is used. See Notes for more details.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
d : int
Number of dimensions.
n : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
kde.integrate_gaussian(mean, cov) : float
Multiply pdf with a specified Gaussian and integrate over the whole
domain.
kde.integrate_box_1d(low, high) : float
Integrate pdf (1D only) between two bounds.
kde.integrate_box(low_bounds, high_bounds) : float
Integrate pdf over a rectangular space between low_bounds and
high_bounds.
kde.integrate_kde(other_kde) : float
Integrate two kernel density estimates multiplied together.
kde.pdf(points) : ndarray
Alias for ``kde.evaluate(points)``.
kde.logpdf(points) : ndarray
Equivalent to ``np.log(kde.evaluate(points))``.
kde.resample(size=None) : ndarray
Randomly sample a dataset from the estimated pdf.
kde.set_bandwidth(bw_method='scott') : None
Computes the bandwidth, i.e. the coefficient that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
.. versionadded:: 0.11.0
kde.covariance_factor : float
Computes the coefficient (`kde.factor`) that multiplies the data
covariance matrix to obtain the kernel covariance matrix.
The default is `scotts_factor`. A subclass can overwrite this method
to provide a different method, or set it through a call to
`kde.set_bandwidth`.
Notes
-----
Bandwidth selection strongly influences the estimate obtained from the KDE
(much more so than the actual shape of the kernel). Bandwidth selection
can be done by a "rule of thumb", by cross-validation, by "plug-in
methods" or by other means; see [3]_, [4]_ for reviews. `gaussian_kde`
uses a rule of thumb, the default is Scott's Rule.
Scott's Rule [1]_, implemented as `scotts_factor`, is::
n**(-1./(d+4)),
with ``n`` the number of data points and ``d`` the number of dimensions.
Silverman's Rule [2]_, implemented as `silverman_factor`, is::
(n * (d + 2) / 4.)**(-1. / (d + 4)).
Good general descriptions of kernel density estimation can be found in [1]_
and [2]_, the mathematics for this multi-dimensional implementation can be
found in [1]_.
References
----------
.. [1] D.W. Scott, "Multivariate Density Estimation: Theory, Practice, and
Visualization", John Wiley & Sons, New York, Chicester, 1992.
.. [2] B.W. Silverman, "Density Estimation for Statistics and Data
Analysis", Vol. 26, Monographs on Statistics and Applied Probability,
Chapman and Hall, London, 1986.
.. [3] B.A. Turlach, "Bandwidth Selection in Kernel Density Estimation: A
Review", CORE and Institut de Statistique, Vol. 19, pp. 1-33, 1993.
.. [4] D.M. Bashtannyk and R.J. Hyndman, "Bandwidth selection for kernel
conditional density estimation", Computational Statistics & Data
Analysis, Vol. 36, pp. 279-298, 2001.
Examples
--------
Generate some random two-dimensional data:
>>> from scipy import stats
>>> def measure(n):
>>> "Measurement model, return two coupled measurements."
>>> m1 = np.random.normal(size=n)
>>> m2 = np.random.normal(scale=0.5, size=n)
>>> return m1+m2, m1-m2
>>> m1, m2 = measure(2000)
>>> xmin = m1.min()
>>> xmax = m1.max()
>>> ymin = m2.min()
>>> ymax = m2.max()
Perform a kernel density estimate on the data:
>>> X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
>>> positions = np.vstack([X.ravel(), Y.ravel()])
>>> values = np.vstack([m1, m2])
>>> kernel = stats.gaussian_kde(values)
>>> Z = np.reshape(kernel(positions).T, X.shape)
Plot the results:
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
... extent=[xmin, xmax, ymin, ymax])
>>> ax.plot(m1, m2, 'k.', markersize=2)
>>> ax.set_xlim([xmin, xmax])
>>> ax.set_ylim([ymin, ymax])
>>> plt.show()
"""
def __init__(self, dataset, bw_method=None):
self.dataset = atleast_2d(dataset)
if not self.dataset.size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.d, self.n = self.dataset.shape
self.set_bandwidth(bw_method=bw_method)
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different than
the dimensionality of the KDE.
"""
points = atleast_2d(points)
d, m = points.shape
if d != self.d:
if d == 1 and m == self.d:
# points was passed in as a row vector
points = reshape(points, (self.d, 1))
m = 1
else:
msg = "points have dimension %s, dataset has dimension %s" % (d,
self.d)
raise ValueError(msg)
result = zeros((m,), dtype=np.float)
if m >= self.n:
# there are more points than data, so loop over data
for i in range(self.n):
diff = self.dataset[:, i, newaxis] - points
tdiff = dot(self.inv_cov, diff)
energy = sum(diff*tdiff,axis=0) / 2.0
result = result + exp(-energy)
else:
# loop over points
for i in range(m):
diff = self.dataset - points[:, i, newaxis]
tdiff = dot(self.inv_cov, diff)
energy = sum(diff * tdiff, axis=0) / 2.0
result[i] = sum(exp(-energy), axis=0)
result = result / self._norm_factor
return result
__call__ = evaluate
def integrate_gaussian(self, mean, cov):
"""
Multiply estimated density by a multivariate Gaussian and integrate
over the whole space.
Parameters
----------
mean : aray_like
A 1-D array, specifying the mean of the Gaussian.
cov : array_like
A 2-D array, specifying the covariance matrix of the Gaussian.
Returns
-------
result : scalar
The value of the integral.
Raises
------
ValueError :
If the mean or covariance of the input Gaussian differs from
the KDE's dimensionality.
"""
mean = atleast_1d(squeeze(mean))
cov = atleast_2d(cov)
if mean.shape != (self.d,):
raise ValueError("mean does not have dimension %s" % self.d)
if cov.shape != (self.d, self.d):
raise ValueError("covariance does not have dimension %s" % self.d)
# make mean a column vector
mean = mean[:, newaxis]
sum_cov = self.covariance + cov
diff = self.dataset - mean
tdiff = dot(linalg.inv(sum_cov), diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result = sum(exp(-energies), axis=0) / sqrt(linalg.det(2 * pi *
sum_cov)) / self.n
return result
def integrate_box_1d(self, low, high):
"""
Computes the integral of a 1D pdf between two bounds.
Parameters
----------
low : scalar
Lower bound of integration.
high : scalar
Upper bound of integration.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDE is over more than one dimension.
"""
if self.d != 1:
raise ValueError("integrate_box_1d() only handles 1D pdfs")
stdev = ravel(sqrt(self.covariance))[0]
normalized_low = ravel((low - self.dataset) / stdev)
normalized_high = ravel((high - self.dataset) / stdev)
value = np.mean(special.ndtr(normalized_high) -
special.ndtr(normalized_low))
return value
def integrate_box(self, low_bounds, high_bounds, maxpts=None):
"""Computes the integral of a pdf over a rectangular interval.
Parameters
----------
low_bounds : array_like
A 1-D array containing the lower bounds of integration.
high_bounds : array_like
A 1-D array containing the upper bounds of integration.
maxpts : int, optional
The maximum number of points to use for integration.
Returns
-------
value : scalar
The result of the integral.
"""
if maxpts is not None:
extra_kwds = {'maxpts': maxpts}
else:
extra_kwds = {}
value, inform = mvn.mvnun(low_bounds, high_bounds, self.dataset,
self.covariance, **extra_kwds)
if inform:
msg = ('An integral in mvn.mvnun requires more points than %s' %
(self.d * 1000))
warnings.warn(msg)
return value
def integrate_kde(self, other):
"""
Computes the integral of the product of this kernel density estimate
with another.
Parameters
----------
other : gaussian_kde instance
The other kde.
Returns
-------
value : scalar
The result of the integral.
Raises
------
ValueError
If the KDEs have different dimensionality.
"""
if other.d != self.d:
raise ValueError("KDEs are not the same dimensionality")
# we want to iterate over the smallest number of points
if other.n < self.n:
small = other
large = self
else:
small = self
large = other
sum_cov = small.covariance + large.covariance
sum_cov_chol = linalg.cho_factor(sum_cov)
result = 0.0
for i in range(small.n):
mean = small.dataset[:, i, newaxis]
diff = large.dataset - mean
tdiff = linalg.cho_solve(sum_cov_chol, diff)
energies = sum(diff * tdiff, axis=0) / 2.0
result += sum(exp(-energies), axis=0)
result /= sqrt(linalg.det(2 * pi * sum_cov)) * large.n * small.n
return result
def resample(self, size=None):
"""
Randomly sample a dataset from the estimated pdf.
Parameters
----------
size : int, optional
The number of samples to draw. If not provided, then the size is
the same as the underlying dataset.
Returns
-------
resample : (self.d, `size`) ndarray
The sampled dataset.
"""
if size is None:
size = self.n
norm = transpose(multivariate_normal(zeros((self.d,), float),
self.covariance, size=size))
indices = randint(0, self.n, size=size)
means = self.dataset[:, indices]
return means + norm
def scotts_factor(self):
return power(self.n, -1./(self.d+4))
def silverman_factor(self):
return power(self.n*(self.d+2.0)/4.0, -1./(self.d+4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def set_bandwidth(self, bw_method=None):
"""Compute the estimator bandwidth with given method.
The new bandwidth calculated after a call to `set_bandwidth` is used
for subsequent evaluations of the estimated density.
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a callable,
it should take a `gaussian_kde` instance as only parameter and
return a scalar. If None (default), nothing happens; the current
`kde.covariance_factor` method is kept.
Notes
-----
.. versionadded:: 0.11
Examples
--------
>>> x1 = np.array([-7, -5, 1, 4, 5.])
>>> kde = stats.gaussian_kde(x1)
>>> xs = np.linspace(-10, 10, num=50)
>>> y1 = kde(xs)
>>> kde.set_bandwidth(bw_method='silverman')
>>> y2 = kde(xs)
>>> kde.set_bandwidth(bw_method=kde.factor / 3.)
>>> y3 = kde(xs)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x1, np.ones(x1.shape) / (4. * x1.size), 'bo',
... label='Data points (rescaled)')
>>> ax.plot(xs, y1, label='Scott (default)')
>>> ax.plot(xs, y2, label='Silverman')
>>> ax.plot(xs, y3, label='Const (1/3 * Silverman)')
>>> ax.legend()
>>> plt.show()
"""
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif np.isscalar(bw_method) and not isinstance(bw_method, string_types):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
self._compute_covariance()
def _compute_covariance(self):
"""Computes the covariance matrix for each Gaussian kernel using
covariance_factor().
"""
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1,
bias=False))
self._data_inv_cov = linalg.inv(self._data_covariance)
self.covariance = self._data_covariance * self.factor**2
self.inv_cov = self._data_inv_cov / self.factor**2
self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n
def pdf(self, x):
"""
Evaluate the estimated pdf on a provided set of points.
Notes
-----
This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``
docstring for more details.
"""
return self.evaluate(x)
def logpdf(self, x):
"""
Evaluate the log of the estimated pdf on a provided set of points.
Notes
-----
See `gaussian_kde.evaluate` for more details; this method simply
returns ``np.log(gaussian_kde.evaluate(x))``.
"""
return np.log(self.evaluate(x))
| bsd-3-clause |
Chasego/beaker-notebook | plugin/ipythonPlugins/src/dist/python3/beaker_runtime3.py | 1 | 18609 | # Copyright 2014 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, json, pandas, numpy
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, IPython, datetime, calendar, math, traceback, time
from IPython.utils.traitlets import Unicode
class OutputContainer:
def __init__(self):
self.items = []
def clear(self):
self.items = [ ]
def addItem(self, obj):
self.items.append(obj)
def getItems(self):
return self.items
class BeakerCodeCell:
def __init__(self, cellId, evaluatorId):
self.cellId = cellId
self.evaluatorId = evaluatorId
self.code = ''
self.outputtype = ''
self.output = None
self.tags = ''
def getCellId(self):
return self.cellId
def getEvaluatorId(self):
return self.evaluatorId
def getCode(self):
return self.code
def getOutputType(self):
return self.outputtype
def getOutput(self):
return self.output
def getTags(self):
return self.tags
def convertTypeName(typ):
if typ.startswith("float"):
return "double"
if typ.startswith("int") or typ.startswith("uint") or typ.startswith("short") or typ.startswith("ushort") or typ.startswith("long") or typ.startswith("ulong"):
return "integer"
if typ.startswith("bool"):
return "boolean"
if typ.startswith("date") or typ.startswith("Time"):
return "time"
return "string"
def isPrimitiveType(typ):
if typ.startswith("float"):
return True
if typ.startswith("int") or typ.startswith("uint") or typ.startswith("short") or typ.startswith("ushort") or typ.startswith("long") or typ.startswith("ulong"):
return True
if typ.startswith("bool"):
return True
if typ.startswith("date") or typ.startswith("Time"):
return True
if typ.startswith("str"):
return True
return False
def isListOfMaps(data):
if type(data) != list:
return False
for w in data:
if type(w) != dict:
return False
for v in w.values():
if not isPrimitiveType(type(v).__name__):
return False
return True
def isDictionary(data):
if type(data) != dict:
return False
for v in data.values():
if not isPrimitiveType(type(v).__name__):
return False
return True
def transformNaN(obj):
if not isinstance(obj, float):
return obj
if math.isnan(obj):
return "Nan";
if math.isinf(obj):
if obj>0:
return "Infinity"
else:
return "-Infinity"
return obj
def transformNaNs(obj):
for x in range(0,len(obj)):
i = obj[x];
if not isinstance(i, float):
continue
if math.isnan(i):
obj[x] = "NaN";
if math.isinf(i):
if i>0:
obj[x] = "Infinity"
else:
obj[x] = "-Infinity"
def fixNaNBack(obj):
if not isinstance(obj, str):
return obj
if obj == "NaN":
return float('nan')
if obj == "Infinity":
return float('inf')
if obj == "-Infinity":
return float('-inf')
return obj
def fixNaNsBack(obj):
for x in range(0,len(obj)):
i = obj[x];
if not isinstance(i, str):
continue
if i == "NaN":
obj[x] = float('nan')
if i == "Infinity":
obj[x] = float('inf')
if i == "-Infinity":
obj[x] = float('-inf')
def transform(obj):
if type(obj) == bytes:
return str(obj)
if isListOfMaps(obj):
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "ListOfMaps"
cols = []
for l in obj:
cols.extend(l.keys())
cols = list(set(cols))
out['columnNames'] = cols
vals = []
for l in obj:
row = []
for r in cols:
if r in l:
row.append(transform(l[r]))
else:
row.append('')
vals.append(row)
out['values'] = vals
return out
if isDictionary(obj):
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Dictionary"
out['columnNames'] = [ "Key", "Value" ]
values = []
for k,v in obj.items():
values.append( [k, transform(v)] )
out['values'] = values
return out
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[k] = transform(v)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transform(v))
return out
if isinstance(obj, OutputContainer):
out = {}
out['type'] = "OutputContainer"
items = []
for v in obj.getItems():
items.append(transform(v))
out['items'] = items
return out
if isinstance(obj, BeakerCodeCell):
out = {}
out['type'] = "BeakerCodeCell"
out['cellId'] = obj.getCellId()
out['evaluatorId'] = obj.getEvaluatorId()
out['code'] = obj.getCode()
out['outputtype'] = obj.getOutputType()
out['output'] = transform(obj.getOutput())
out['tags'] = obj.getTags()
return out
return transformNaN(obj)
def transformBack(obj):
if type(obj) == dict:
out = {}
for k,v in obj.items():
out[str(k)] = transformBack(v)
if "type" in out:
if out['type'] == "BeakerCodeCell":
c = BeakerCodeCell(out['cellId'], out['evaluatorId'])
if 'code' in out:
c.code = out['code']
if 'outputtype' in out:
c.outputtype = out['outputtype']
if 'output' in out:
c.output = transformBack(out['output'])
if 'tags' in out:
c.tags = out['tags']
return c
if out['type'] == "OutputContainer":
c = OutputContainer()
if 'items' in out:
for i in out['items']:
c.addItem(i)
return c;
if out['type'] == "Date":
return datetime.datetime.fromtimestamp(out["timestamp"]/1000)
if out['type'] == "TableDisplay":
if 'subtype' in out:
if out['subtype'] == "Dictionary":
out2 = { }
for r in out['values']:
out2[r[0]] = fixNaNBack(r[1])
if out['columnNames'][0] == "Index":
return pandas.Series(out2)
return out2
if out['subtype'] == "Matrix":
vals = out['values']
fixNaNsBack(vals)
return numpy.matrix(vals)
if out['subtype'] == "ListOfMaps":
out2 = []
cnames = out['columnNames']
for r in out['values']:
out3 = { }
for i in range(len(cnames)):
if r[i] != '':
out3[ cnames[i] ] = r[i]
out2.append(out3)
return out2
# transform to dataframe
if ('hasIndex' in out) and (out['hasIndex'] == "true"):
# first column becomes the index
vals = out['values']
cnames = out['columnNames'][1:]
index = []
for x in range(0,len(vals)):
index.append(transformBack(vals[x][0]))
v = vals[x][1:]
fixNaNsBack(v)
vals[x] = v
return pandas.DataFrame(data=vals, columns=cnames, index=index)
else:
vals = out['values']
cnames = out['columnNames']
for x in range(0,len(vals)):
v = vals[x]
fixNaNsBack(v)
vals[x] = v
return pandas.DataFrame(data=vals, columns=cnames)
return out
if type(obj) == list:
out = []
for v in obj:
out.append(transformBack(v))
return out
try:
if type(obj) == bytes:
obj = str(obj)
except Exception as e:
return obj
return obj
# should be inner class to Beaker
class DataFrameEncoder(json.JSONEncoder):
def default(self, obj):
# similarly handle Panels.
# make this extensible by the user to handle their own types.
if isinstance(obj, numpy.generic):
return transformNaN(obj.item())
if isinstance(obj, numpy.ndarray) and obj.ndim == 2:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Matrix"
cols = [ ]
for i in range(obj.shape[1]):
cols.append( "c" + str(i) )
out['columnNames'] =cols
vars = obj.tolist()
for x in range(0,len(vars)):
transformNaNs(vars[x])
out['values'] = vars
return out
if isinstance(obj, numpy.ndarray):
ret = obj.tolist()
transformNaNs(ret)
return ret
if type(obj) == datetime.datetime or type(obj) == datetime.date or type(obj).__name__ == 'Timestamp':
out = {}
out['type'] = "Date"
out['timestamp'] = int(obj.strftime("%s"))*1000
return out
if type(obj) == pandas.core.frame.DataFrame:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "TableDisplay"
out['hasIndex'] = "true"
out['columnNames'] = ['Index'] + obj.columns.tolist()
vals = obj.values.tolist()
idx = obj.index.tolist()
for x in range(0,len(vals)):
vals[x] = [ idx[x] ] + vals[x]
ty = []
num = len(obj.columns.tolist())
x = 0;
for x in range(0,num+1):
ty.append( convertTypeName(type(vals[0][x]).__name__))
out['types'] = ty
for x in range(0,len(vals)):
transformNaNs(vals[x])
out['values'] = vals
return out
if type(obj) == pandas.core.series.Series:
basict = True
for i in range(len(obj)):
if not isPrimitiveType(type(obj[i]).__name__):
basict = False
break
if basict:
out = {}
out['type'] = "TableDisplay"
out['subtype'] = "Dictionary"
out['columnNames'] = [ "Index", "Value" ]
values = []
for k,v in obj.items():
values.append( [k, transform(v)] )
out['values'] = values
return out
return obj.to_dict()
return json.JSONEncoder.default(self, obj)
class MyJSONFormatter(IPython.core.formatters.BaseFormatter):
format_type = Unicode('application/json')
def __call__(self, obj):
try:
obj = transform(obj)
return json.dumps(obj, cls=DataFrameEncoder)
except Exception as e:
#print(e)
#traceback.print_exc()
return None
class Beaker:
"""Runtime support for Python code in Beaker."""
session_id = ''
core_url = '127.0.0.1:' + os.environ['beaker_core_port']
_beaker_password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
_beaker_password_mgr.add_password(None, core_url, 'beaker',
os.environ['beaker_core_password'])
_beaker_url_opener = urllib.request.build_opener(urllib.request.HTTPBasicAuthHandler(_beaker_password_mgr), urllib.request.ProxyHandler({}))
def set4(self, var, val, unset, sync):
args = {'name': var, 'session':self.session_id, 'sync':sync}
if not unset:
val = transform(val)
args['value'] = json.dumps(val, cls=DataFrameEncoder)
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/set',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
reply = conn.read().decode("utf-8")
if reply != 'ok':
raise NameError(reply)
def get(self, var):
req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/get?' +
urllib.parse.urlencode({
'name': var,
'session':self.session_id}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
if not result['defined']:
raise NameError('name \'' + var + '\' is not defined in notebook namespace')
return transformBack(result['value'])
def set_session(self, id):
self.session_id = id
def register_output(self):
ip = IPython.InteractiveShell.instance()
ip.display_formatter.formatters['application/json'] = MyJSONFormatter(parent=ip.display_formatter)
def set(self, var, val):
return self.set4(var, val, False, True)
def createOutputContainer(self):
return OutputContainer()
def showProgressUpdate(self):
return "WARNING: python3 language plugin does not support progress updates"
def evaluate(self,filter):
args = {'filter': filter, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/evaluate',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def evaluateCode(self, evaluator,code):
args = {'evaluator': evaluator, 'code' : code, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/evaluateCode',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def showStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/showStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def clearStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/clearStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def showTransientStatus(self,msg):
args = {'msg': msg, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/showTransientStatus',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def getEvaluators(self):
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/getEvaluators?' +
urllib.parse.urlencode({
'session':self.session_id}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def getCodeCells(self,filter):
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/getCodeCells?' +
urllib.parse.urlencode({
'session':self.session_id, 'filter':filter}))
conn = self._beaker_url_opener.open(req)
result = json.loads(conn.read().decode())
return transformBack(result)
def setCodeCellBody(self,name,body):
args = {'name': name, 'body':body, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellBody',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def setCodeCellEvaluator(self,name,evaluator):
args = {'name': name, 'evaluator':evaluator, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellEvaluator',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def setCodeCellTags(self,name,tags):
args = {'name': name, 'tags':tags, 'session':self.session_id}
req = urllib.request.Request('http://' + self.core_url + '/rest/notebookctrl/setCodeCellTags',
urllib.parse.urlencode(args).encode('utf8'))
conn = self._beaker_url_opener.open(req)
result = conn.read()
return result=="true"
def __setattr__(self, name, value):
if 'session_id' == name:
self.__dict__['session_id'] = value
return
return self.set(name, value)
def __getattr__(self, name):
return self.get(name)
| apache-2.0 |
jason-neal/astro_scripts | SWEETCat.py | 1 | 1644 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division, print_function
import os
import requests
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
try:
import seaborn as sns
sns.set_context('talk')
except ImportError:
print('For better looking plots, try install seaborn (pip install seaborn)')
pass
def _read_sweetcat(fname):
"""
Read SWEETCat into a pandas DataFrame
"""
if not isinstance(fname, str):
raise ValueError('Input name must be a str')
names = ['star', 'hd', 'ra', 'dec', 'vmag', 'ervmag', 'par', 'erpar',
'parsource', 'teff', 'erteff', 'logg', 'erlogg', 'logglc',
'erlogglc', 'vt', 'ervt', 'metal', 'ermetal', 'mass', 'ermass',
'author', 'link', 'source', 'update', 'comment1', 'comment2']
df = pd.read_csv(fname, sep='\t', names=names, na_values=['~'])
# Adding luminosity to the DataFrame
df['lum'] = (df.teff/5777)**4 * df.mass
return df
def _download_sweetcat(fout):
"""
Download SWEETCAT and write it to file
"""
url = 'https://www.astro.up.pt/resources/sweet-cat/download.php'
table = requests.get(url)
with open(fout, 'w') as file:
file.write(table.content)
path = os.path.expanduser('~/.SWEETCat/')
_sc = os.path.join(path, 'sweetcat.csv')
if os.path.isdir(path):
if not os.path.isfile(_sc):
print('Downloading SWEET-Cat...')
_download_sweetcat(_sc)
else:
os.mkdir(path)
print('{0!s} Created'.format(path))
print('Downloading SWEET-Cat...')
_download_sweetcat(_sc)
df = _read_sweetcat(_sc)
| mit |
chenyyx/scikit-learn-doc-zh | examples/en/decomposition/plot_sparse_coding.py | 60 | 4016 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_cd', 2, None, 'turquoise'), ]
lw = 2
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| gpl-3.0 |
kjung/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
robbymeals/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
f3r/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
mjvakili/supermean | code/fastsampler.py | 1 | 5083 | import numpy as np
import matplotlib.pyplot as plt
import pyfits as pf
import sampler_new
import h5py
import matplotlib
matplotlib.use('Agg')
from matplotlib import rc
rc('font',**{'family':'serif','serif':'Computer Modern Roman','size':12})
rc('text', usetex=True)
from matplotlib import cm
import numpy as np
from matplotlib.colors import LogNorm
from mpl_toolkits.axes_grid1 import make_axes_locatable
#b = pf.open("wfc3_f160w_clean_central100.fits")[1].data[0:120,:]
fl= 1e-5
f = .05
g = .01
H = 3
X = np.exp(np.loadtxt("superb_wfc_mean_iter_5_nfl.txt"))
data = pf.open("wfc3_f160w_clean_central100.fits")[0].data[0:120,:]
mask = pf.open("wfc3_f160w_clean_central100.fits")[1].data[0:120,:]
mask[mask == 0] = -1
mask[mask > 0] = False
mask[mask < 0] = True
mask = mask == True
cx = np.loadtxt("wfc3_f160w_clean_central100_matchedfilterpoly_cx.txt")[0:120]
cy = np.loadtxt("wfc3_f160w_clean_central100_matchedfilterpoly_cy.txt")[0:120]
F = np.loadtxt("superb_wfc_flux_iter_5_nfl.txt")#.reshape(75,75)
B = np.loadtxt("superb_wfc_bkg_iter_5_nfl.txt")#.reshape(75,75)
MS = h5py.File('masked_samplerx3.hdf5','r')
MD = h5py.File('masked_data.hdf5','r')
masked_samplers = MS["masked_samplerx3"]#[random]
masked_data = MD["masked_data"]#[random]
import sampler
import time
import scipy.optimize as op
def function(theta, masked_data, mask , sr_psf, flux, bkg, floor, gain):
"""
Inputs:
theta = [old_cx, old_cy],
where:
old_cx = current sub-pixel shift of the center of star in x at the naitive pixel resolution
old_cy = current sub-pixel shift of the center of star in y at the naitive pixel resolution
masked_data = patch with flagged pixels masked out
mask = flagged pixels
sr_psf = super resolution psf model
floor = floor variance of the noise model
gain = gain of the noise model
Outputs:
(non-regularized) NLL of the patch.
Note that the regularization term is
only a function of superresolution psf,
so we do not include it here.
"""
Kp = sampler.imatrix_new(25, H, theta[0], theta[1])[: , mask] # masked sampler
model = flux*np.dot(sr_psf , Kp) + bkg # maksed model
var = floor + gain * np.abs(model)
res = masked_data - model
func = 0.5*np.sum(((res)**2.)/var) + 0.5*np.sum(np.log(var))
return func
def fit(theta , masked_data, mask , sr_psf, flux, bkg, floor, gain):
#size = I.shape[0]
#x = np.linspace(0.5, size - .5, size)
#y = x[:,np.newaxis]
#x0_true = I.shape[0]/2.
#y0_true = I.shape[0]/2.
#flux0_true = 1.
#size = I.shape[0]
#x = np.linspace(0.5, size - .5, size)
#y = x[:,np.newaxis]
nll = lambda *args: function(*args)
results = op.fmin(nll , [theta[0] , theta[1]] , args = (masked_data, mask , sr_psf, flux, bkg, floor, gain))# , disp = False)
#flux0_ml , x0_ml , y0_ml = results["x"]
return results[0] , results[1]
for p in range(0,2):
a = time.time()
print cx[p] , cy[p]
x, y = fit((cx[p] , cy[p]) , data[p,mask[p]], mask[p] , X, F[p], B[p], 0.05, 0.01)
Kp = sampler.imatrix_new(25, H, x,y)
print time.time() - a
#Kp = sampler_new.imatrix_new(25, H , cx[p] , cy[p])
model = F[p]*np.dot(fl+X,Kp) + B[p]
resi = (data[p,:] - model).reshape(25,25)
res = (data[p,:] - model)*100/data[p,:]
res = res.reshape(25,25)
chi = (data[p,:] - model)/(f+g*np.abs(model))**.5
chi = chi.reshape(25,25)
maskp = mask[p,:].reshape(25,25)
#res[maskp!=0] = np.nan
chi[maskp!=True] = np.nan
#resi[maskp!=0] = np.nan
mi = min(data[p , mask[p]].min(), model[mask[p]].min())
ma = max(data[p , mask[p]].max(), model[mask[p]].max())
plt.figure(figsize=(10,10))
plt.subplot(1,3,1)
plt.set_cmap('RdBu')
ax = plt.gca()
im = ax.imshow(data[p].reshape(25,25) , interpolation="None" , origin="lower" , norm = LogNorm(), vmin = mi , vmax = ma)
plt.title("Data")
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im , cax = cax)
ax.set_xticks(())
ax.set_yticks(())
plt.subplot(1,3,2)
ax = plt.gca()
im = ax.imshow(model.reshape(25,25) , interpolation = "None" , origin = "lower",norm=LogNorm(), vmin = mi , vmax = ma)
plt.title("Model")
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im , cax = cax)
ax.set_xticks(())
ax.set_yticks(())
plt.subplot(1,3,3)
ax = plt.gca()
im = ax.imshow(chi.reshape(25,25) , interpolation = "None" , origin = "lower")
plt.title("Chi")
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im , cax = cax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
plt.close()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.