repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cybernet14/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
arbazkhan002/datasketch | benchmark/weighted_minhash_benchmark.py | 1 | 2445 | '''
Benchmarking the performance and accuracy of WeightedMinHash.
'''
import time, logging, random
from hashlib import sha1
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from datasketch import WeightedMinHashGenerator
logging.basicConfig(level=logging.INFO)
def run_perf(dim, num_rep, sample_size):
wmg = WeightedMinHashGenerator(dim, sample_size=sample_size)
logging.info("WeightedMinHash using %d samples" % sample_size)
data = np.random.uniform(0, dim, (num_rep, dim))
durs = []
for i in range(num_rep):
start = time.clock()
wmg.minhash(data[i])
duration = (time.clock() - start) * 1000
durs.append(duration)
ave = np.mean(durs)
logging.info("Generated %d minhashes, average time %.4f ms" % (num_rep, ave))
return ave
def jaccard(v1, v2):
min_sum = np.sum(np.minimum(v1, v2))
max_sum = np.sum(np.maximum(v1, v2))
return float(min_sum) / float(max_sum)
def run_acc(dim, num_rep, sample_size):
logging.info("WeightedMinHash using %d samples" % sample_size)
wmg = WeightedMinHashGenerator(dim, sample_size=sample_size)
data1 = np.random.uniform(0, dim, (num_rep, dim))
data2 = np.random.uniform(0, dim, (num_rep, dim))
errs = []
for i in range(num_rep):
wm1 = wmg.minhash(data1[i])
wm2 = wmg.minhash(data2[i])
j_e = wm1.jaccard(wm2)
j = jaccard(data1[i], data2[i])
errs.append(abs(j - j_e))
ave = np.mean(errs)
logging.info("%d runs, mean error %.4f" % (num_rep, ave))
return ave
sample_sizes = range(10, 160, 10)
num_rep = 100
dim = 5000
output = "weighted_minhash_benchmark.png"
logging.info("> Running performance tests")
run_times = [run_perf(dim, num_rep, n) for n in sample_sizes]
logging.info("> Running accuracy tests")
errs = [run_acc(dim, num_rep, n) for n in sample_sizes]
logging.info("> Plotting result")
fig, axe = plt.subplots(1, 2, sharex=True, figsize=(10, 4))
ax = axe[1]
ax.plot(sample_sizes, run_times, marker='+')
ax.set_xlabel("Number of samples")
ax.set_ylabel("Running time (ms)")
ax.set_title("WeightedMinHash performance")
ax.grid()
ax = axe[0]
ax.plot(sample_sizes, errs, marker='+')
ax.set_xlabel("Number of samples")
ax.set_ylabel("Absolute error in Jaccard estimation")
ax.set_title("WeightedMinHash accuracy")
ax.grid()
fig.savefig(output, bbox_inches="tight")
logging.info("Plot saved to %s" % output)
| mit |
untom/scikit-learn | sklearn/linear_model/ridge.py | 89 | 39360 | """
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
coefs[i] = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)[0]
return coefs
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _rescale_data(X, y, sample_weight):
"""Rescale data so as to support sample_weight"""
n_samples = X.shape[0]
sample_weight = sample_weight * np.ones(n_samples)
sample_weight = np.sqrt(sample_weight)
sw_matrix = sparse.dia_matrix((sample_weight, 0),
shape=(n_samples, n_samples))
X = safe_sparse_dot(sw_matrix, X)
y = safe_sparse_dot(sw_matrix, y)
return X, y
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is set, then
the solver will automatically be set to 'cholesky'
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional information
depending on the solver used.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
Notes
-----
This function won't compute the intercept.
"""
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in
# any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
# Sample weight can be implemented via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr'):
raise ValueError('Solver %s not understood' % solver)
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == "lsqr":
coef = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_ = ridge_regression(X, y,
alpha=self.alpha,
sample_weight=sample_weight,
max_iter=self.max_iter,
tol=self.tol,
solver=self.solver)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}
shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
All three solvers support both dense and sparse data.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto"):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational
routines. 'svd' will use a Singular value decomposition to obtain
the solution, 'cholesky' will use the standard
scipy.linalg.solve function, 'sparse_cg' will use the
conjugate gradient solver as found in
scipy.sparse.linalg.cg while 'auto' will chose the most
appropriate depending on the matrix X. 'lsqr' uses
a direct regularized least-squares routine provided by scipy.
tol : float
Precision of the solution.
Attributes
----------
coef_ : array, shape = [n_features] or [n_classes, n_features]
Weight vector(s).
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto"):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight' : sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
If an integer is passed, it is the number of folds for KFold cross
validation. Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter.
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``(2*C)^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator, optional
If None, Generalized Cross-Validation (efficient Leave-One-Out)
will be used.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 42 | 11137 | from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.linalg import eigh
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.testing import assert_true, assert_equal, assert_raises
from sklearn.utils.testing import SkipTest
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_sparse_graph_connected_component():
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in zip(boundaries[:-1], boundaries[1:]):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(.1, 42, size=len(connections))
affinity = coo_matrix((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in zip(boundaries[:-1], boundaries[1:]):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert_equal(component_1.sum(), component_size)
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert_equal(component_2.sum(), component_size)
assert_array_equal(component_1, component_2)
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components] * dd
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
JohnStarich/github-code-recommendations | data-scripts/word-diff.py | 1 | 4631 | #!/usr/bin/env python3
import re
import numpy as np
import pandas as pd
from pymongo import MongoClient
from collections import Counter
import math
from sklearn.metrics import accuracy_score
client = MongoClient()
db = client.github
collection = db.events
data = list(collection.find(
{"type": "PullRequestEvent", "payload.pull_request.merged": True, "payload.pull_request.head.repo.language": "Python", "word_diff": {"$exists": True, "$ne": ""}},
{"word_diff": 1},
))
print("Data size = {}".format(len(data)))
plus = re.compile("{\+ (.*?) \+}", flags=re.VERBOSE | re.DOTALL)
minus = re.compile("\[- (.*?) -\]", flags=re.VERBOSE | re.DOTALL)
adds = Counter()
removes = Counter()
def get_matches(data):
add = plus.findall(data)
remove = minus.findall(data)
for match in add:
match = cleanup(match)
if match:
for line in match.split():
if line:
adds[line] += 1
for match in remove:
match = cleanup(match)
if match:
for line in match.split():
if line:
removes[line] += 1
return add, remove
def cleanup(match):
return match.replace(" ", "").replace("\t", "")
for datum in data:
get_matches(datum["word_diff"])
ngram_add = {}
ngram_remove = {}
def getNGrams(text, n):
text = (" " * (n - 1)) + text + " "
return [text[i:i+n] for i in range(len(text) - n + 1)]
def getConditionalCounts(sentences, n):
condCounts = {}
for sentence in sentences:
ngrams = getNGrams(sentence, n)
for gram in ngrams:
context, lastChar = gram[:n - 1], gram[-1]
condCounts.setdefault(context, {}).setdefault(lastChar, 0)
condCounts[context][lastChar] += 1
return condCounts
class CharNGram:
def __init__(self, language, conditionalCounts, n):
self.language = language
self.condCounts = conditionalCounts
self.n = n
self._getNormalizedCounts()
def _contextcounttotals(self, ctx):
if ctx in self.condCounts:
return sum(self.condCounts[ctx].values())
else:
return 0
def _getNormalizedCounts(self):
for ctx, counts in self.condCounts.items():
for lastChar, count in counts.items():
self.condCounts[ctx][lastChar] = \
(count + 1)/float(26 + self._contextcounttotals(ctx))
"""
Using conditional frequency distribution,
calculate and return p(c | ctx)
"""
def ngramProb(self, ctx, c):
return self.condCounts.get(ctx, {}).get(c, 1.0/float(26 + self._contextcounttotals(ctx)))
""" Multiply ngram probabilites for each ngram in word """
def wordProb(self, word):
prob = 1.0
for ctx, counts in getConditionalCounts([word], self.n).items():
for lastChar, count in counts.items():
prob *= self.ngramProb(ctx, lastChar) * count
return math.log(prob)
class CodeModel:
def __init__(self, models):
self.models = models
def guess(self, word):
highestProb = max(model.wordProb(word.lower())
for model in self.models)
guess = [model for model in self.models
if model.wordProb(word.lower()) == highestProb]
return guess[0].language
def prob(self, language, word):
return [model for model in self.models
if model.language == language][0].wordProb(word.lower())
ngram_adds = getConditionalCounts(adds.keys(), 5)
ngram_removes = getConditionalCounts(removes.keys(), 5)
good = CharNGram("Good", ngram_adds, 5)
bad = CharNGram("Bad", ngram_removes, 5)
cm = CodeModel([good, bad])
correct = 0
incorrect = 0
pred_good = []
pred_bad = []
for match in adds:
try:
guess = cm.guess(match)
except:
continue
if guess == "Good":
correct += 1
pred_good.append("Good")
else:
incorrect += 1
pred_good.append("Bad")
def good():
for _ in pred_good:
yield "Good"
print("Accuracy for Good Code: {}".format(accuracy_score(list(good()), pred_good)))
for match in removes:
try:
guess = cm.guess(match)
except:
continue
if guess == "Bad":
correct += 1
pred_bad.append("Bad")
else:
incorrect += 1
pred_bad.append("Good")
def bad():
for _ in pred_bad:
yield "Bad"
print("Accuracy for Bad Code: {}".format(accuracy_score(list(bad()), pred_bad)))
print("Ratio: Correct/Total = {}".format(correct/(correct + incorrect)))
print(incorrect + correct)
| apache-2.0 |
maggiemaes/Methods-for-BAX-recruitment-analysis | plotters.py | 1 | 8978 | import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import analysis_tools as tools
import scipy.spatial as spatial
from scipy.stats import norm, rayleigh
from itertools import cycle
from scipy import stats
import numpy as np
from pylab import *
import matplotlib
def plot_bax_oligo(data, fit=True, verbose=False):
color = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
colorcycle = cycle(color)
for id, timeseries in data.items():
acolor = colorcycle.next()
x, y = tools.get_xy(timeseries)
plt. plot(x, y, color = acolor, label= "mito" + ' ' + str(id), linewidth=2)
if not fit:
continue
if fit:
xfit, yfit, _, _ = tools.bax_fit(timeseries, id)
plt.plot(xfit, yfit, color = acolor, linewidth=2, label= "mito " + str(id) + " fit")
plt.xlabel('Time (minutes)')
plt.ylabel('Percent Baseline')
plt.axhline(y = 0, linewidth=3, color='black')
plt.axvline(x=0, linewidth=3, color="black")
plt.xticks(size = 14)
plt.yticks(size = 14)
plt.legend(loc=2, shadow=False, prop={'size':8}, bbox_transform=plt.gcf().transFigure)
#plt.legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=plt.gcf().transFigure, prop={'size':2})
plt.title(title)
plt.show()
def plot_scatter(x, y):
plt.scatter(x, y)
plt.axhline(y = 0, linewidth=3, color='black')
plt.axvline(x=0, linewidth=3, color="black")
plt.ylim(0, ymax = 3)
plt.xlim(0, xmax = 1000)
plt.xlabel("Cell")
plt.ylabel("Time (min)")
plt.show()
def plot_oligo_double_axis(x, x2, y, y2):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(x, x2, 'o', color = 'black')
ax1.set_ylabel('Rate of Oligomerization')
ax2 = ax1.twinx()
ax2.plot(y, y2, 'o', color = 'r')
ax2.set_ylabel('Time of initiation', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.xlim(0, max(x) +1)
plt.ylim(0, max(y2) +20)
plt.show()
def plot_cytc(data, fit=True, verbose=False):
color = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
colorcycle = cycle(color)
for id, timeseries in data.items():
acolor = colorcycle.next()
x, y = tools.get_xy(timeseries)
#print x, y, id
if not fit:
plt.plot(x, y, color = acolor, label=str(id), linewidth=2)
continue
if fit:
xfit, yfit, popt = tools.cytc_fit(timeseries, id)
if xfit != None:
#plt.plot(x, y, xfit, yfit, color = acolor, linewidth=2, label="mito " + str(id) + " fit")
i = tools.sigmoid_inflection(xfit, popt[0], popt[1], popt[2], popt[3], popt[4])
slope_d1 = tools.sigmoid_d1(xfit[i], *popt)
if slope_d1 == abs(0) or math.isnan(slope_d1) == True:
print slope_d1, "slope_d1"
max_line = sum(yfit[:20]) / len(yfit[:20])
min_line = sum(yfit[480:]) / len(yfit[480:])
y_inflection = ((max_line - min_line) / 2) + min_line
fl = tools.crosses(yfit, y_inflection)
x_inflection = xfit[fl]
print max_line, min_line, x_inflection
b = y_inflection - (x_inflection * 0)
x_line = range(len(xfit))
for point in range(len(x_line)):
y_line = (0* point) + b
plt.plot(xfit, yfit, color= 'green')
plt.axvline(x= x_inflection, color = 'black')
plt.plot(x_inflection, y_inflection, 'o', color= 'g')
plt.show()
elif i != len(xfit) - 1 and i != 0:
slope = tools.sigmoid_d1(xfit[i], *popt)
yy = []
yymin = []
bb = yfit[i] - (xfit[i] * slope)
xx = range(len(xfit))
for vals in range(len(xx)):
yys = (slope * vals) + bb
yy.append(yys)
xxmin = np.arange(-500, 500, 5)
for vals in range(len(xxmin)):
yymins = tools.sigmoid(vals, popt[0], popt[1], popt[2], popt[3], popt[4])
yymin.append(yymins)
cmin_line = (sum(yymin[180:]) / len(yymin[180:]))
cmax_line= cmin_line + (2 * (yfit[i] - cmin_line))
cytc_decay = tools.crosses(yy, cmax_line)
plt.plot(xfit, yfit, color = 'blue', linewidth=2)
plt.axvline(x= cytc_decay, linewidth=2, color= 'red')
plt.plot(xfit[i], yfit[i], 'o', color='r')
#cyt_decay = xfit[not_zero[0]]
#cyt_sat = xfit[not_zero[-1]]
print slope_d1, i, "=parangs"
plt.xlabel('Time (minutes)')
plt.ylabel('Percent Baseline')
plt.axhline(y = 0, linewidth=3, color='black')
#plt.axvline(x= cyt_decay, linewidth=3, color="black")
# # plt.axvline(x= cytc_decay, linewidth=2, color= 'red')
# plt.xticks(size = 14)
# plt.yticks(size = 14)
# plt.legend(loc=2, shadow=False, prop={'size':8}, bbox_transform=plt.gcf().transFigure) # continue
plt.show()
def plot_dist_rate(data, xyzs, center):
ks = {}
for id, timeseries in data.items():
x, y = tools.get_xy(timeseries)
try:
_, _, k = tools.get_sigmoid_fit(x, y, verbose=True)
ks[id] = k
except RuntimeError:
continue
dists = tools.distance_from_center(xyzs, center)
print dists
x = []
y = []
for id, _ in ks.items():
x.append(dists[id])
y.append(ks[id])
plt.scatter(x,y)
plt.show()
def plot_compiled_dist_rate(xs, ys):
slope, intercept, r_value, p_value, std_err = stats.linregress(xs, ys)
line = [slope * x + intercept for x in xs]
print "rval", r_value, "pval", p_value
plt.xlabel('Distance from nucleus (um)')
plt.ylabel('Rate of Oligomerization (RFU / min)')
plt.plot(xs, line, 'r-', label = "r = 0.06", linewidth=2)
plt.plot(xs, line, 'r-', label = "pval = 0.25", linewidth=2)
plt.scatter(xs, ys, facecolor="black", linewidth=2)
legend = plt.legend(loc=1)
frame = legend.get_frame()
frame.set_linewidth(2)
plt.rcParams['axes.linewidth'] = 2
plt.axhline(y = -0.5, linewidth=3, color='black')
plt.axhline(y = 3, linewidth=3, color='black')
plt.axvline(x=0, linewidth=3, color="black")
plt.axvline(x=30, linewidth=3, color="black")
plt.show()
def plot_agg_initiation(x):
bins_bound = np.linspace(0, 1000, 100)
#events, edges, patches = hist(slopes, bins)
#plt.plot(bins)#num_bins = 100
n, bins, patches = plt.hist(x, bins_bound, facecolor = 'white', linewidth = 0)
norm_data = []
# print bins, bins_bound
for i in range(len(n)):
norm = (n[i] / sum(n))
print norm, "=norm"
norm_data.append(norm)
norm_data.append(0)
print norm_data, sum(norm_data), len(norm_data)
width = bins_bound[1]- bins_bound[0]
print width
plt.bar(bins_bound, norm_data, width, color= "gray")
#plt.hist(x, bins=num_bins, color = 'black')
plt.xlim(xmin = 0, xmax = 1000)
plt.ylim(ymax = 0.5)
plt.xlabel('Time (min)')
plt.ylabel(' Percent of Frequency * 100 (of mitochondria)')
plt.title('Time of Initiation After STS/Hdac3 Addition')
plt.show()
print len(x)
def plot_oligo_rate_histo(norm_data):
# norm_data = []
bins_bound = np.linspace(0, 5, 36)
# n, bins, patches = plt.hist(slopes, bins_bound, normed=True, stacked = True)
# total = sum(n)
# for i in range(len(n)):
# norm = (n[i] / total)
# print norm, "=norm"
# norm_data.append(norm)
# print norm_data, sum(norm_data)
plt.hist(norm_data, bins=bins_bound, color = 'black')
plt.xlabel('Rate of BAX Oligomerization\n(RFU min^-1)' , size = 16)
plt.ylabel('Frequency\n(number of mitochondria)', size = 16)
plt.legend()
#plt.title('Rate of Aggregation per Mitochondria of One Cell ')
#plt.ylim(ymax = 2)
plt.xlim(xmax = 1)
#plt.axes(frameon = False)
#ax1.get_xaxis().tick_bottom()
#ax1.get_yaxis().tick_left()
#plt.rcParams['axes.linewidth'] = 2
plt.axhline(y = 0, linewidth=3, color='black')
plt.axvline(x=0, linewidth=3, color="black")
plt.xticks(size = 14)
plt.yticks(size = 14)
plt.minorticks_off()
plt.tick_params('both', length=8, width=2, which='major')
plt.show()
def plot_cell_start(xs, ys, xfit, yfit, k, xinfl, yinfl, y0):
plt.plot(xs, ys, linewidth=2)
plt.plot(xfit, yfit, linewidth=2)
line = [k*x + yinfl - k*xinfl for x in xfit]
# plt.plot(xfit, line, label="Maximum Rate", linewidth=2)
y0s = [y0 for x in xfit]
# plt.plot(xfit, y0s, label="Minimum Boundary", linewidth=2, color= "black")
plt.ylim([0, 5])
plt.xlim([0, 100])
plt.xlabel("Relative Time (min)")
plt.ylabel("Relative Fluorescence")
plt.axhline(y=0, linewidth = 2.5, color= 'black')
plt.axvline(x=0, linewidth = 2.5, color= 'black')
# legend = plt.legend(loc=2)
# frame = legend.get_frame()
# frame.set_linewidth(2)
# plt.rcParams['axes.linewidth'] = 2
# plt.show()
| mit |
Aasmi/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
antoinecarme/pyaf | tests/demos/demo1.py | 1 | 1343 | import numpy as np
import pandas as pd
# generate a daily signal covering one year 2016 in a pandas dataframe
N = 365
np.random.seed(seed=1960)
df_train = pd.DataFrame({"Date" : pd.date_range(start="2016-01-25", periods=N, freq='D'),
"Signal" : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))})
# print(df_train.head(N))
import pyaf.ForecastEngine as autof
# create a forecast engine. This is the main object handling all the operations
lEngine = autof.cForecastEngine()
# get the best time series model for predicting one week
lEngine.train(iInputDS = df_train, iTime = 'Date', iSignal = 'Signal', iHorizon = 7);
lEngine.getModelInfo() # => relative error 7% (MAPE)
# predict one week
df_forecast = lEngine.forecast(iInputDS = df_train, iHorizon = 7)
# list the columns of the forecast dataset
print(df_forecast.columns) #
# print the real forecasts
# Future dates : ['2017-01-19T00:00:00.000000000' '2017-01-20T00:00:00.000000000' '2017-01-21T00:00:00.000000000' '2017-01-22T00:00:00.000000000' '2017-01-23T00:00:00.000000000' '2017-01-24T00:00:00.000000000' '2017-01-25T00:00:00.000000000']
print(df_forecast['Date'].tail(7).values)
# signal forecast : [ 9.74934646 10.04419761 12.15136455 12.20369717 14.09607727 15.68086323 16.22296559]
print(df_forecast['Signal_Forecast'].tail(7).values)
| bsd-3-clause |
ZENGXH/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
liyu1990/sklearn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
EmmaIshta/QUANTAXIS | QUANTAXIS/QAFetch/QATushare.py | 1 | 3783 | # coding: utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2017 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import pandas as pd
import tushare as QATs
from QUANTAXIS.QAUtil import (QA_util_date_int2str, QA_util_date_stamp,
QA_util_log_info, QA_util_to_json_from_pandas)
def QA_fetch_get_stock_day(name, startDate='', endDate='', if_fq='01', type_='json'):
if (len(name) != 6):
name = str(name)[0:6]
if str(if_fq) in ['qfq', '01']:
if_fq = 'qfq'
elif str(if_fq) in ['hfq', '02']:
if_fq = 'hfq'
elif str(if_fq) in ['bfq', '00']:
if_fq = 'bfq'
else:
QA_util_log_info('wrong with fq_factor! using qfq')
if_fq = 'qfq'
data = QATs.get_k_data(str(name), startDate, endDate,
ktype='D', autype=if_fq, retry_count=200, pause=0.005).sort_index()
data['date_stamp'] = data['date'].apply(lambda x: QA_util_date_stamp(x))
data['fqtype'] = if_fq
if type_ in ['json']:
data_json = QA_util_to_json_from_pandas(data)
return data_json
elif type_ in ['pd', 'pandas', 'p']:
data['date'] = pd.to_datetime(data['date'])
data = data.set_index('date', drop=False)
data['date'] = data['date'].apply(lambda x: str(x)[0:10])
return data
def QA_fetch_get_stock_realtime():
data = QATs.get_today_all()
data_json = QA_util_to_json_from_pandas(data)
return data_json
def QA_fetch_get_stock_info(name):
data = QATs.get_stock_basics()
data_json = QA_util_to_json_from_pandas(data)
for i in range(0, len(data_json) - 1, 1):
data_json[i]['code'] = data.index[i]
return data_json
def QA_fetch_get_stock_tick(name, date):
if (len(name) != 6):
name = str(name)[0:6]
return QATs.get_tick_data(name, date)
def QA_fetch_get_stock_list():
df = QATs.get_stock_basics()
return list(df.index)
def QA_fetch_get_stock_time_to_market():
data = QATs.get_stock_basics()
return data[data['timeToMarket'] != 0]['timeToMarket'].apply(lambda x: QA_util_date_int2str(x))
def QA_fetch_get_trade_date(endDate, exchange):
data = QATs.trade_cal()
da = data[data.isOpen > 0]
data_json = QA_util_to_json_from_pandas(data)
message = []
for i in range(0, len(data_json) - 1, 1):
date = data_json[i]['calendarDate']
num = i + 1
exchangeName = 'SSE'
data_stamp = QA_util_date_stamp(date)
mes = {'date': date, 'num': num,
'exchangeName': exchangeName, 'date_stamp': data_stamp}
message.append(mes)
return message
# test
# print(get_stock_day("000001",'2001-01-01','2010-01-01'))
# print(get_stock_tick("000001.SZ","2017-02-21"))
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/test_date_converters.py | 2 | 1204 | from datetime import datetime
import numpy as np
import pandas.util.testing as tm
import pandas.io.date_converters as conv
def test_parse_date_time():
dates = np.array(["2007/1/3", "2008/2/4"], dtype=object)
times = np.array(["05:07:09", "06:08:00"], dtype=object)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)])
result = conv.parse_date_time(dates, times)
tm.assert_numpy_array_equal(result, expected)
def test_parse_date_fields():
days = np.array([3, 4])
months = np.array([1, 2])
years = np.array([2007, 2008])
result = conv.parse_date_fields(years, months, days)
expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
tm.assert_numpy_array_equal(result, expected)
def test_parse_all_fields():
hours = np.array([5, 6])
minutes = np.array([7, 8])
seconds = np.array([9, 0])
days = np.array([3, 4])
years = np.array([2007, 2008])
months = np.array([1, 2])
result = conv.parse_all_fields(years, months, days, hours, minutes, seconds)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)])
tm.assert_numpy_array_equal(result, expected)
| apache-2.0 |
guziy/basemap | examples/ccsm_popgrid.py | 2 | 2884 | from __future__ import (absolute_import, division, print_function)
"""
This example shows how to plot data on rectangular 2D grids
(grids that are not rectlinear in geographic or native map projection
coordinates).
An example of such a grid is the 'POP' grid which is used in
the ocean component NCAR Community Climate System Model (CCSM).
"POP" stands for "Parallel Ocean Program", which was developed
at Los Alamos.
These grids may be thought of as rectangular arrays wrapped around the
globe in the usual way, with one subscript, call it I, associated with
longitude and the other subscript, call it J, associated with latitude,
and then deformed in such a way as to move the top edge of the array to
a circle centered somewhere other than over the North Pole (typically,
over Greenland or Canada) and the bottom edge of the array to a circle
that is centered on the South Pole, but lies entirely within Antarctica.
The lines defined by the rows and columns of the rectangular arrays are
locally orthogonal to each other.
POP grids are used extensively locally in oceanographic and ice models.
"""
from matplotlib import rcParams
import numpy.ma as ma
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset as NetCDFFile
# read in data from netCDF file.
infile = 'ccsm_popgrid.nc'
fpin = NetCDFFile(infile)
tlat = fpin.variables['TLAT'][:]
tlon = fpin.variables['TLONG'][:]
# masked array returned, masked where data == _FillValue
temp = fpin.variables['TEMP'][:]
fpin.close()
# make longitudes monotonically increasing.
tlon = np.where(np.greater_equal(tlon,min(tlon[:,0])),tlon-360,tlon)
# stack grids side-by-side (in longitiudinal direction), so
# any range of longitudes may be plotted on a world map.
tlon = np.concatenate((tlon,tlon+360),1)
tlat = np.concatenate((tlat,tlat),1)
temp = ma.concatenate((temp,temp),1)
tlon = tlon-360.
plt.figure(figsize=(6,8))
plt.subplot(2,1,1)
# subplot 1 just shows POP grid cells.
m = Basemap(projection='merc', lat_ts=20, llcrnrlon=-180, \
urcrnrlon=180, llcrnrlat=-84, urcrnrlat=84, resolution='c')
m.drawcoastlines()
m.fillcontinents(color='white')
x, y = m(tlon,tlat)
im = m.pcolormesh(x,y,ma.masked_array(np.zeros(temp.shape,'f'), temp.mask),
shading='faceted', antialiased=True, cmap=plt.cm.cool,
vmin=0, vmax=0)
# disclaimer: these are not really the grid cells because of the
# way pcolor interprets the x and y args.
plt.title('(A) CCSM POP Grid Cells')
# subplot 2 is a contour plot of surface temperature from the
# CCSM ocean model.
plt.subplot(2,1,2)
m.drawcoastlines()
m.fillcontinents(color='white')
CS1 = m.contourf(x,y,temp,15)
CS2 = m.contour(x,y,temp,15,colors='black',linewidths=0.5)
plt.title('(B) Surface Temp contours on POP Grid')
plt.show()
#plt.savefig('ccsm_popgrid.ps')
| gpl-2.0 |
jnsebgosselin/WHAT | gwhat/meteo/gapfill_weather_gui.py | 1 | 56739 | # -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
from __future__ import division, unicode_literals
# ---- Standard library imports
# import csv
from time import sleep # ctime, strftime, sleep
import os
# ---- Third party imports
from PyQt5.QtCore import pyqtSlot as QSlot
from PyQt5.QtCore import pyqtSignal as QSignal
from PyQt5.QtCore import Qt, QThread, QDate, QRect
from PyQt5.QtGui import QBrush, QColor, QFont, QPainter, QCursor, QTextDocument
from PyQt5.QtWidgets import (QWidget, QPushButton, QGridLayout, QFrame,
QTabWidget, QLabel, QComboBox, QTextEdit,
QDateEdit, QSpinBox, QRadioButton, QCheckBox,
QProgressBar, QApplication, QMessageBox,
QFileDialog, QTableWidget, QHeaderView,
QStyleOptionHeader, QStyle, QDesktopWidget,
QTableWidgetItem)
import numpy as np
from xlrd.xldate import xldate_from_date_tuple
from xlrd import xldate_as_tuple
import matplotlib as mpl
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT
# ---- Local imports
from gwhat.meteo.gapfill_weather_algorithm2 import GapFillWeather
from gwhat.meteo.gapfill_weather_postprocess import PostProcessErr
from gwhat.meteo.merge_weather_data import WXDataMergerWidget
from gwhat.meteo.weather_reader import add_PET_to_weather_datafile
from gwhat.common import StyleDB
from gwhat.utils import icons
from gwhat.utils.icons import QToolButtonSmall
import gwhat.common.widgets as myqt
from gwhat.common.utils import delete_file
from gwhat.widgets.layout import HSep
class GapFillWeatherGUI(QWidget):
ConsoleSignal = QSignal(str)
def __init__(self, parent=None):
super(GapFillWeatherGUI, self).__init__(parent)
self.isFillAll_inProgress = False
self.FILLPARAM = GapFill_Parameters()
# Correlation calculation won't be triggered by events when
# CORRFLAG is 'off'
self.CORRFLAG = 'on'
self.wxdata_merger = WXDataMergerWidget()
# Setup gap fill worker and thread :
self.gapfill_worker = GapFillWeather()
self.gapfill_thread = QThread()
self.gapfill_worker.moveToThread(self.gapfill_thread)
self.set_workdir(os.getcwd())
self.__initUI__()
def __initUI__(self):
self.setWindowIcon(icons.get_icon('master'))
# ---- Toolbar at the bottom
self.btn_fill = QPushButton('Fill Station')
self.btn_fill.setIcon(icons.get_icon('fill_data'))
self.btn_fill.setIconSize(icons.get_iconsize('small'))
self.btn_fill.setToolTip('<p>Fill the gaps in the daily weather data '
' for the selected weather station.</p>')
self.btn_fill_all = QPushButton('Fill All Stations')
self.btn_fill_all.setIconSize(icons.get_iconsize('small'))
self.btn_fill_all.setIcon(icons.get_icon('fill_all_data'))
self.btn_fill_all.setToolTip('<p>Fill the gaps in the daily weather '
' data for all the weather stations' +
' displayed in the list.</p>')
grid_toolbar = QGridLayout()
widget_toolbar = QFrame()
row = 0
col = 1
grid_toolbar.addWidget(self.btn_fill, row, col)
col += 1
grid_toolbar.addWidget(self.btn_fill_all, row, col)
grid_toolbar.setSpacing(5)
grid_toolbar.setContentsMargins(0, 0, 0, 0)
grid_toolbar.setColumnStretch(0, 100)
grid_toolbar.setColumnStretch(col+1, 100)
widget_toolbar.setLayout(grid_toolbar)
# ---- Target Station groupbox
self.target_station = QComboBox()
self.target_station.currentIndexChanged.connect(
self.target_station_changed)
self.target_station_info = QTextEdit()
self.target_station_info.setReadOnly(True)
self.target_station_info.setMaximumHeight(110)
self.btn_refresh_staList = QToolButtonSmall(icons.get_icon('refresh'))
self.btn_refresh_staList.setToolTip(
'Force the reloading of the weather data files')
self.btn_refresh_staList.clicked.connect(self.btn_refresh_isclicked)
btn_merge_data = QToolButtonSmall(icons.get_icon('merge_data'))
btn_merge_data.setToolTip(
'Tool for merging two ore more datasets together.')
btn_merge_data.clicked.connect(self.wxdata_merger.show)
self.btn_delete_data = QToolButtonSmall(icons.get_icon('clear'))
self.btn_delete_data.setEnabled(False)
self.btn_delete_data.setToolTip(
'Remove the currently selected dataset and delete the input '
'datafile. However, raw datafiles will be kept.')
self.btn_delete_data.clicked.connect(self.delete_current_dataset)
widgets = [self.target_station, self.btn_refresh_staList,
btn_merge_data, self.btn_delete_data]
# Generate the layout for the target station group widget.
self.tarSta_widg = QWidget()
tarSta_grid = QGridLayout(self.tarSta_widg)
row = 0
tarSta_grid.addWidget(QLabel('<b>Fill data for weather station :</b>'),
row, 0, 1, len(widgets))
row += 1
tarSta_grid.addWidget(self.target_station, 1, 0)
for col, widget in enumerate(widgets):
tarSta_grid.addWidget(widget, row, col)
row += 1
tarSta_grid.addWidget(self.target_station_info,
row, 0, 1, len(widgets))
tarSta_grid.setSpacing(5)
tarSta_grid.setColumnStretch(0, 500)
tarSta_grid.setContentsMargins(0, 0, 0, 10) # (L,T,R,B)
# Gapfill Dates :
label_From = QLabel('From : ')
self.date_start_widget = QDateEdit()
self.date_start_widget.setDisplayFormat('dd / MM / yyyy')
self.date_start_widget.setEnabled(False)
label_To = QLabel('To : ')
self.date_end_widget = QDateEdit()
self.date_end_widget.setEnabled(False)
self.date_end_widget.setDisplayFormat('dd / MM / yyyy')
self.fillDates_widg = QWidget()
fillDates_grid = QGridLayout()
row = 0
col = 0
fillDates_grid.addWidget(label_From, row, col)
col += 1
fillDates_grid.addWidget(self.date_start_widget, row, col)
row += 1
col = 0
fillDates_grid.addWidget(label_To, row, col)
col += 1
fillDates_grid.addWidget(self.date_end_widget, row, col)
fillDates_grid.setColumnStretch(row+1, 500)
fillDates_grid.setContentsMargins(0, 0, 0, 0) # (L, T, R, B)
fillDates_grid.setSpacing(10)
self.fillDates_widg.setLayout(fillDates_grid)
def station_sel_criteria(self):
# Widgets :
Nmax_label = QLabel('Nbr. of stations :')
self.Nmax = QSpinBox()
self.Nmax.setRange(0, 99)
self.Nmax.setSingleStep(1)
self.Nmax.setValue(4)
self.Nmax.setAlignment(Qt.AlignCenter)
ttip = ('<p>Distance limit beyond which neighboring stations'
' are excluded from the gapfilling procedure.</p>'
'<p>This condition is ignored if set to -1.</p>')
distlimit_label = QLabel('Max. Distance :')
distlimit_label.setToolTip(ttip)
self.distlimit = QSpinBox()
self.distlimit.setRange(-1, 9999)
self.distlimit.setSingleStep(1)
self.distlimit.setValue(100)
self.distlimit.setToolTip(ttip)
self.distlimit.setSuffix(' km')
self.distlimit.setAlignment(Qt.AlignCenter)
ttip = ('<p>Altitude difference limit over which neighboring '
' stations are excluded from the gapfilling procedure.</p>'
'<p>This condition is ignored if set to -1.</p>')
altlimit_label = QLabel('Max. Elevation Diff. :')
altlimit_label.setToolTip(ttip)
self.altlimit = QSpinBox()
self.altlimit.setRange(-1, 9999)
self.altlimit.setSingleStep(1)
self.altlimit.setValue(350)
self.altlimit.setToolTip(ttip)
self.altlimit.setSuffix(' m')
self.altlimit.setAlignment(Qt.AlignCenter)
# Layout :
container = QFrame()
grid = QGridLayout()
row = 0
grid.addWidget(self.Nmax, row, 1)
grid.addWidget(Nmax_label, row, 0)
row += 1
grid.addWidget(distlimit_label, row, 0)
grid.addWidget(self.distlimit, row, 1)
row += 1
grid.addWidget(altlimit_label, row, 0)
grid.addWidget(self.altlimit, row, 1)
grid.setContentsMargins(10, 0, 10, 0) # [L, T, R, B]
grid.setColumnStretch(2, 500)
grid.setSpacing(10)
container.setLayout(grid)
return container
def regression_model(self):
# ---- Widgets ----
self.RMSE_regression = QRadioButton('Ordinary Least Squares')
self.RMSE_regression.setChecked(True)
self.ABS_regression = QRadioButton('Least Absolute Deviations')
# ---- Layout ----
container = QFrame()
grid = QGridLayout()
row = 0
grid.addWidget(self.RMSE_regression, row, 0)
row += 1
grid.addWidget(self.ABS_regression, row, 0)
grid.setSpacing(5)
grid.setContentsMargins(10, 0, 10, 0) # (L, T, R, B)
container.setLayout(grid)
return container
def advanced_settings(self):
# ---- Row Full Error ----
self.full_error_analysis = QCheckBox('Full Error Analysis.')
self.full_error_analysis.setCheckState(Qt.Unchecked)
# ---- Row ETP ----
self.add_PET_ckckbox = QCheckBox('Add PET to data file.')
self.add_PET_ckckbox.setCheckState(Qt.Unchecked)
self.add_PET_ckckbox.setToolTip(
'<p>Add daily potential evapotranspiration, calculated '
'with the Thornthwaite (1948) method, to '
'the output weather data file.</p>')
self.btn_add_PET = QToolButtonSmall(icons.get_icon('openFile'))
self.btn_add_PET.setToolTip(
'<p>Add daily potential evapotranspiration, calculated '
'with the Thornthwaite (1948) method, to '
'an existing weather data file.</p>')
self.btn_add_PET.clicked.connect(self.btn_add_PET_isClicked)
pet_rowlayout = QGridLayout()
pet_rowlayout.addWidget(self.add_PET_ckckbox, 0, 0)
pet_rowlayout.addWidget(self.btn_add_PET, 0, 2)
pet_rowlayout.setContentsMargins(0, 0, 0, 0)
pet_rowlayout.setColumnStretch(1, 100)
# Figure format and language.
self.fig_format = QComboBox()
self.fig_format.addItems(PostProcessErr.SUPPORTED_FIG_FORMATS)
self.fig_language = QComboBox()
self.fig_language.addItems(PostProcessErr.SUPPORTED_LANGUAGES)
fig_opt_layout = QGridLayout()
fig_opt_layout.addWidget(QLabel("Figure output format : "), 0, 0)
fig_opt_layout.addWidget(self.fig_format, 0, 2)
fig_opt_layout.addWidget(QLabel("Figure labels language : "), 1, 0)
fig_opt_layout.addWidget(self.fig_language, 1, 2)
fig_opt_layout.setContentsMargins(0, 0, 0, 0)
fig_opt_layout.setColumnStretch(1, 100)
# ---- Row Layout Assembly ----
container = QFrame()
grid = QGridLayout(container)
grid.addWidget(self.full_error_analysis, 0, 0)
grid.addLayout(pet_rowlayout, 1, 0)
grid.addLayout(fig_opt_layout, 2, 0)
grid.setSpacing(5)
grid.setContentsMargins(10, 0, 10, 0) # [L, T, R, B]
grid.setRowStretch(grid.rowCount(), 100)
return container
# STACKED WIDGET :
cutoff_widg = station_sel_criteria(self)
MLRM_widg = regression_model(self)
advanced_widg = advanced_settings(self)
self.stack_widget = myqt.QToolPanel()
self.stack_widget.setIcons(icons.get_icon('triright'),
icons.get_icon('tridown'))
self.stack_widget.addItem(cutoff_widg, 'Stations Selection Criteria :')
self.stack_widget.addItem(MLRM_widg, 'Regression Model :')
self.stack_widget.addItem(advanced_widg, 'Advanced Settings :')
# ---- LEFT PANEL
grid_leftPanel = QGridLayout()
self.LEFT_widget = QFrame()
self.LEFT_widget.setFrameStyle(0) # styleDB.frame
row = 0
grid_leftPanel.addWidget(self.tarSta_widg, row, 0)
row += 1
grid_leftPanel.addWidget(self.fillDates_widg, row, 0)
row += 1
grid_leftPanel.addWidget(HSep(), row, 0)
row += 1
grid_leftPanel.addWidget(self.stack_widget, row, 0)
row += 2
grid_leftPanel.addWidget(HSep(), row, 0)
row += 1
grid_leftPanel.addWidget(widget_toolbar, row, 0)
grid_leftPanel.setVerticalSpacing(15)
grid_leftPanel.setRowStretch(row-2, 500)
grid_leftPanel.setContentsMargins(0, 0, 0, 0) # (L, T, R, B)
# grid_leftPanel.setColumnMinimumWidth(0, styleDB.sideBarWidth)
self.LEFT_widget.setLayout(grid_leftPanel)
# ---- Right Panel
self.FillTextBox = QTextEdit()
self.FillTextBox.setReadOnly(True)
# self.FillTextBox.setFrameStyle(styleDB.frame)
self.FillTextBox.setMinimumWidth(700)
# self.FillTextBox.setStyleSheet(
# "QTextEdit {background-color:transparent;}")
self.FillTextBox.setFrameStyle(0)
self.FillTextBox.document().setDocumentMargin(10)
self.sta_display_summary = QTextEdit()
self.sta_display_summary.setReadOnly(True)
# self.sta_display_summary.setStyleSheet(
# "QTextEdit {background-color:transparent;}")
self.sta_display_summary.setFrameStyle(0)
self.sta_display_summary.document().setDocumentMargin(10)
self.gafill_display_table = GapFillDisplayTable()
# grid_rightPanel = QGridLayout()
# new_table = QFrame()
# new_table.setFrameStyle(0)
# row = 0
# grid_rightPanel.addWidget(self.gafill_display_table2 , row, 0)
# row += 1
# grid_rightPanel.addWidget(self.gafill_display_table , row, 0)
#
# grid_rightPanel.setRowStretch(row, 500)
# grid_rightPanel.setColumnStretch(0, 500)
# grid_rightPanel.setSpacing(0)
# grid_rightPanel.setContentsMargins(0, 0, 0, 0) #(L, T, R, B)
#
# new_table.setLayout(grid_rightPanel)
RIGHT_widget = QTabWidget()
RIGHT_widget.addTab(self.FillTextBox, 'Correlation Coefficients')
RIGHT_widget.addTab(self.sta_display_summary, 'Missing Data Overview')
# RIGHT_widget.addTab(self.gafill_display_table,
# 'New Table (Work-in-Progress)')
# ---- Main grid
grid_MAIN = QGridLayout()
row = 0
grid_MAIN.addWidget(self.LEFT_widget, row, 0)
grid_MAIN.addWidget(RIGHT_widget, row, 1)
grid_MAIN.setColumnStretch(1, 500)
grid_MAIN.setRowStretch(0, 500)
grid_MAIN.setSpacing(15)
grid_MAIN.setContentsMargins(15, 15, 15, 15) # L, T, R, B
self.setLayout(grid_MAIN)
# ---- Progress Bar
self.pbar = QProgressBar()
self.pbar.setValue(0)
self.pbar.hide()
# ---- Events
# CORRELATION :
self.distlimit.valueChanged.connect(self.correlation_table_display)
self.altlimit.valueChanged.connect(self.correlation_table_display)
self.date_start_widget.dateChanged.connect(
self.correlation_table_display)
self.date_end_widget.dateChanged.connect(
self.correlation_table_display)
# GAPFILL :
self.gapfill_worker.ProgBarSignal.connect(self.pbar.setValue)
self.gapfill_worker.GapFillFinished.connect(
self.gapfill_worker_return)
self.gapfill_worker.ConsoleSignal.connect(self.ConsoleSignal.emit)
self.btn_fill.clicked.connect(self.gap_fill_btn_clicked)
self.btn_fill_all.clicked.connect(self.gap_fill_btn_clicked)
# =========================================================================
@property
def workdir(self):
return self.__workdir
def set_workdir(self, dirname):
self.__workdir = dirname
self.gapfill_worker.inputDir = os.path.join(dirname, 'Meteo', 'Input')
self.gapfill_worker.outputDir = os.path.join(
dirname, 'Meteo', 'Output')
self.wxdata_merger.set_workdir(os.path.join(dirname, 'Meteo', 'Input'))
def delete_current_dataset(self):
"""
Delete the current dataset source file and force a reload of the input
daily weather datafiles.
"""
current_index = self.target_station.currentIndex()
if current_index != -1:
basename = self.gapfill_worker.WEATHER.fnames[current_index]
dirname = self.gapfill_worker.inputDir
filename = os.path.join(dirname, basename)
delete_file(filename)
self.load_data_dir_content(reload=True)
def btn_refresh_isclicked(self):
"""
Handles when the button to refresh the list of input daily weather
datafiles is clicked
"""
self.load_data_dir_content(reload=True)
def load_data_dir_content(self, reload=False):
"""
Initiate the loading of weater data files contained in the
*/Meteo/Input folder and display the resulting station list in the
target station combobox.
"""
# Reset UI :
self.FillTextBox.setText('')
self.target_station_info.setText('')
self.target_station.clear()
QApplication.processEvents()
# Load data and fill UI with info :
self.CORRFLAG = 'off'
# Correlation calculation won't be triggered when this is off
if reload:
stanames = self.gapfill_worker.reload_data()
else:
stanames = self.gapfill_worker.load_data()
stanames = [] if stanames is None else stanames
self.target_station.addItems(stanames)
self.target_station.setCurrentIndex(-1)
self.sta_display_summary.setHtml(self.gapfill_worker.read_summary())
if len(stanames) > 0:
self.set_fill_and_save_dates()
self.CORRFLAG = 'on'
def set_fill_and_save_dates(self): # =====================================
"""
Set first and last dates of the data serie in the boxes of the
*Fill and Save* area.
"""
if len(self.gapfill_worker.WEATHER.DATE) > 0:
self.date_start_widget.setEnabled(True)
self.date_end_widget.setEnabled(True)
DATE = self.gapfill_worker.WEATHER.DATE
DateMin = QDate(DATE[0, 0], DATE[0, 1], DATE[0, 2])
DateMax = QDate(DATE[-1, 0], DATE[-1, 1], DATE[-1, 2])
self.date_start_widget.setDate(DateMin)
self.date_start_widget.setMinimumDate(DateMin)
self.date_start_widget.setMaximumDate(DateMax)
self.date_end_widget.setDate(DateMax)
self.date_end_widget.setMinimumDate(DateMin)
self.date_end_widget.setMaximumDate(DateMax)
def correlation_table_display(self): # ===================================
"""
This method plot the table in the display area. It is separated from
the method <Correlation_UI> because red numbers and statistics
regarding missing data for the selected time period can be updated in
the table when the user changes the values without having to
recalculate the correlation coefficient each time.
"""
if self.CORRFLAG == 'on' and self.target_station.currentIndex() != -1:
self.FILLPARAM.limitDist = self.distlimit.value()
self.FILLPARAM.limitAlt = self.altlimit.value()
y = self.date_start_widget.date().year()
m = self.date_start_widget.date().month()
d = self.date_start_widget.date().day()
self.FILLPARAM.time_start = xldate_from_date_tuple((y, m, d), 0)
y = self.date_end_widget.date().year()
m = self.date_end_widget.date().month()
d = self.date_end_widget.date().day()
self.FILLPARAM.time_end = xldate_from_date_tuple((y, m, d), 0)
self.gafill_display_table.populate_table(
self.gapfill_worker.TARGET,
self.gapfill_worker.WEATHER,
self.FILLPARAM)
table, target_info = correlation_table_generation(
self.gapfill_worker.TARGET,
self.gapfill_worker.WEATHER,
self.FILLPARAM)
self.FillTextBox.setText(table)
self.target_station_info.setText(target_info)
@QSlot(int)
def target_station_changed(self, index):
"""Handles when the target station is changed on the GUI side."""
self.btn_delete_data.setEnabled(index != -1)
if index != -1:
self.correlation_UI()
def correlation_UI(self):
"""
Calculate automatically the correlation coefficients when a target
station is selected by the user in the drop-down menu or if a new
station is selected programmatically.
"""
if self.CORRFLAG == 'on' and self.target_station.currentIndex() != -1:
index = self.target_station.currentIndex()
self.gapfill_worker.set_target_station(index)
msg = ('Correlation coefficients calculation for ' +
'station %s completed') % self.gapfill_worker.TARGET.name
self.ConsoleSignal.emit('<font color=black>%s</font>' % msg)
print(msg)
self.correlation_table_display()
def restoreUI(self): # ===================================================
self.btn_fill.setIcon(icons.get_icon('fill_data'))
self.btn_fill.setEnabled(True)
self.btn_fill_all.setIcon(icons.get_icon('fill_all_data'))
self.btn_fill_all.setEnabled(True)
self.tarSta_widg.setEnabled(True)
self.fillDates_widg.setEnabled(True)
self.stack_widget.setEnabled(True)
self.pbar.setValue(0)
QApplication.processEvents()
self.pbar.hide()
def get_dataset_names(self):
"""
Return a list of the names of the dataset that are loaded in
memory and listed in the target station dropdown menu.
"""
return [self.target_station.itemText(i) for i in
range(self.target_station.count())]
def get_time_from_qdatedit(self, obj):
y = obj.date().year()
m = obj.date().month()
d = obj.date().day()
return xldate_from_date_tuple((y, m, d), 0)
def gap_fill_btn_clicked(self):
# ----------------------------------------- Stop Thread if Running ----
if self.gapfill_thread.isRunning():
print('!Stopping the gap-filling routine!')
# ---- Pass a flag to the worker to tell him to stop ----
self.gapfill_worker.STOP = True
self.isFillAll_inProgress = False
# UI will be restored in *gapfill_worker_return* method
return
# ---------------------------------------------------- Data is Empty --
# Check if Station List is Empty :
nSTA = len(self.gapfill_worker.WEATHER.STANAME)
if nSTA == 0:
msg = ('There is no data to fill.')
btn = QMessageBox.Ok
QMessageBox.warning(self, 'Warning', msg, btn)
return
# ------------------------------------------- CHECK FOR DATES ERRORS --
time_start = self.get_time_from_qdatedit(self.date_start_widget)
time_end = self.get_time_from_qdatedit(self.date_end_widget)
if time_start > time_end:
print('The time period is invalid.')
msg = ('<i>From</i> date is set to a later time than '
'the <i>To</i> date.')
btn = QMessageBox.Ok
QMessageBox.warning(self, 'Warning', msg, btn)
return
# --------------------------------------------- Check Which Button ----
button = self.sender()
if button == self.btn_fill:
if self.target_station.currentIndex() == -1:
# Check if Station is Selected.
msg = 'No weather station is currently selected'
btn = QMessageBox.Ok
QMessageBox.warning(self, 'Warning', msg, btn)
return
self.btn_fill_all.setEnabled(False)
self.isFillAll_inProgress = False
sta_indx2fill = self.target_station.currentIndex()
elif button == self.btn_fill_all:
# Fill All Stations
self.btn_fill.setEnabled(False)
self.isFillAll_inProgress = True
sta_indx2fill = 0
# -- Disable UI and continue the process normally --
button.setIcon(icons.get_icon('stop'))
self.fillDates_widg.setEnabled(False)
self.tarSta_widg.setEnabled(False)
self.stack_widget.setEnabled(False)
self.pbar.show()
QApplication.processEvents()
self.gap_fill_start(sta_indx2fill)
def gapfill_worker_return(self, event):
"""
Method initiated from an automatic return from the gapfilling
process in batch mode. Iterate over the station list and continue
process normally.
"""
self.gapfill_thread.quit()
nSTA = len(self.gapfill_worker.WEATHER.STANAME)
if event:
sta_indx2fill = self.target_station.currentIndex() + 1
if self.isFillAll_inProgress is False or sta_indx2fill == nSTA:
# Single fill process completed sucessfully for the current
# selected weather station OR Fill All process completed
# sucessfully for all the weather stations in the list.
self.gapfill_worker.STOP = False
self.isFillAll_inProgress = False
self.restoreUI()
else:
self.gap_fill_start(sta_indx2fill)
else:
print('Gap-filling routine stopped.')
# The gapfilling routine was stopped from the UI.
self.gapfill_worker.STOP = False
self.isFillAll_inProgress = False
self.restoreUI()
def gap_fill_start(self, sta_indx2fill):
# ----- Wait for the QThread to finish -----
# Protection in case the QTread did not had time to close completely
# before starting the downloading process for the next station.
waittime = 0
while self.gapfill_thread.isRunning():
print('Waiting for the fill weather data thread to close ' +
'before processing with the next station.')
sleep(0.1)
waittime += 0.1
if waittime > 15:
msg = ('This function is not working as intended.' +
' Please report a bug.')
print(msg)
self.ConsoleSignal.emit('<font color=red>%s</font>' % msg)
return
# -------------------------------------------------------- UPDATE UI --
self.CORRFLAG = 'off'
self.target_station.setCurrentIndex(sta_indx2fill)
self.CORRFLAG = 'on'
# Calculate correlation coefficient for the next station.
self.correlation_UI()
# ----------------------------------------------------- START THREAD --
# -- Pass information to the worker --
self.gapfill_worker.outputDir = os.path.join(
self.workdir, "Meteo", "Output")
self.gapfill_worker.fig_format = self.fig_format.currentText()
self.gapfill_worker.fig_language = self.fig_language.currentText()
time_start = self.get_time_from_qdatedit(self.date_start_widget)
time_end = self.get_time_from_qdatedit(self.date_end_widget)
self.gapfill_worker.time_start = time_start
self.gapfill_worker.time_end = time_end
self.gapfill_worker.NSTAmax = self.Nmax.value()
self.gapfill_worker.limitDist = self.distlimit.value()
self.gapfill_worker.limitAlt = self.altlimit.value()
# self.gapfill_worker.TARGET = self.TARGET
self.gapfill_worker.regression_mode = self.RMSE_regression.isChecked()
self.gapfill_worker.full_error_analysis = \
self.full_error_analysis.isChecked()
self.gapfill_worker.add_ETP = self.add_PET_ckckbox.isChecked()
# ---- Start the Thread ----
try:
self.gapfill_thread.started.disconnect(
self.gapfill_worker.fill_data)
except TypeError:
# self.gapfill_worker.fill_data is not connected
pass
finally:
self.gapfill_thread.started.connect(self.gapfill_worker.fill_data)
self.gapfill_thread.start()
def btn_add_PET_isClicked(self):
"""
Add PET to the selected weather datafile.
"""
dirname = os.path.join(self.workdir, "Meteo", "Output")
filename, _ = QFileDialog.getOpenFileName(
self, 'Select a valid weather data file.', dirname, '*.out')
if filename:
add_PET_to_weather_datafile(filename)
class StaLocManager(QWidget):
def __init__(self, *args, **kwargs):
super(StaLocManager, self).__init__(*args, **kwargs)
self.figure = mpl.figure.Figure()
self.canvas = FigureCanvasQTAgg(self.figure)
toolbar = NavigationToolbar2QT(self.canvas, self)
layout = QGridLayout()
self.setLayout(layout)
layout.addWidget(toolbar, 0, 0)
layout.addWidget(self.canvas, 1, 0)
self.__init_plot__()
def __init_plot__(self):
self.figure.set_facecolor('white')
self.figure.add_subplot(111)
def plot_stations(self, lat, lon, name):
ax = self.figure.axes[0]
ax.plot(lon, lat, 'o')
for i in range(len(name)):
ax.annotate(name[i], xy=(lon[i], lat[i]), textcoords='data')
def plot_obswells(self, lat, lon, name):
ax = self.figure.axes[0]
ax.plot(lon, lat, 'o', color='red')
ax.annotate(name, xy=(lon, lat), textcoords='data')
# =============================================================================
def correlation_table_generation(TARGET, WEATHER, FILLPARAM):
"""
This fucntion generate an HTML output to be displayed in the
<Fill Data> tab display area after a target station has been
selected by the user.
"""
STANAME = WEATHER.STANAME
nSTA = len(STANAME)
nVAR = len(WEATHER.VARNAME)
Ndata_limit = int(365 / 2.)
limitDist = FILLPARAM.limitDist
limitAlt = FILLPARAM.limitAlt
# -------------------------------------------- TARGET STATION INFO TABLE --
date_start = xldate_as_tuple(FILLPARAM.time_start, 0)
date_start = '%02d/%02d/%04d' % (WEATHER.DATE_START[TARGET.index, 2],
WEATHER.DATE_START[TARGET.index, 1],
WEATHER.DATE_START[TARGET.index, 0])
date_end = xldate_as_tuple(FILLPARAM.time_end, 0)
date_end = '%02d/%02d/%04d' % (WEATHER.DATE_END[TARGET.index, 2],
WEATHER.DATE_END[TARGET.index, 1],
WEATHER.DATE_END[TARGET.index, 0])
FIELDS = ['Latitude', 'Longitude', 'Altitude', 'Data date start',
'Data date end']
HEADER = [WEATHER.LAT[TARGET.index],
WEATHER.LON[TARGET.index],
WEATHER.ALT[TARGET.index],
date_start,
date_end]
target_info = '''<table border="0" cellpadding="1" cellspacing="0"
align="left">'''
for i in range(len(HEADER)):
target_info += '<tr>'
target_info += '<td align="left">%s</td>' % FIELDS[i]
target_info += '<td align="left"> : </td>'
target_info += '<td align="left">%s</td>' % HEADER[i]
target_info += '</tr>'
target_info += '</table>'
# -------------------------------------------------------- SORT STATIONS --
# Stations best correlated with the target station are displayed toward
# the top of the table while neighboring stations poorly correlated are
# displayed toward the bottom.
# Define a criteria for sorting the correlation quality of the stations.
CORCOEF = TARGET.CORCOEF
DATA = WEATHER.DATA
TIME = WEATHER.TIME
SUM_CORCOEF = np.sum(CORCOEF, axis=0) * -1 # Sort in descending order.
index_sort = np.argsort(SUM_CORCOEF)
# Reorganize the data.
CORCOEF = CORCOEF[:, index_sort]
DATA = DATA[:, index_sort, :]
STANAME = STANAME[index_sort]
HORDIST = TARGET.HORDIST[index_sort]
ALTDIFF = TARGET.ALTDIFF[index_sort]
target_station_index = np.where(TARGET.name == STANAME)[0]
index_start = np.where(TIME == FILLPARAM.time_start)[0][0]
index_end = np.where(TIME == FILLPARAM.time_end)[0][0]
# ---------------------------------------------- Determine filling dates --
fill_date_start = xldate_as_tuple(FILLPARAM.time_start, 0)
fill_date_start = '%02d/%02d/%04d' % (fill_date_start[2],
fill_date_start[1],
fill_date_start[0])
fill_date_end = xldate_as_tuple(FILLPARAM.time_end, 0)
fill_date_end = '%02d/%02d/%04d' % (fill_date_end[2],
fill_date_end[1],
fill_date_end[0])
# --------------------------------------------------- missing data table --
table1 = '''
<p align=justify>
Table 1 : Number of days with missing data from
<b>%s</b> to <b>%s</b> for station <b>%s</b>:
</p>
''' % (fill_date_start, fill_date_end, TARGET.name)
table1 += '''
<table border="0" cellpadding="3" cellspacing="0"
align="center">
<tr>
<td colspan="5"><hr></td>
</tr>
<tr>
'''
table1 += '''
<td width=135 align="left">Weather Variable</td>
<td align="center">T<sub>max</sub></td>
<td align="center">T<sub>min</sub></sub></td>
<td align="center">T<sub>mean</sub></td>
<td align="center">P<sub>tot</sub></td>
'''
table1 += '''
</tr>
<tr>
<td colspan="5"><hr></td>
</tr>
<tr>
<td width=135 align="left">Days with<br>missing data</td>
'''
total_nbr_data = index_end - index_start + 1
for var in range(nVAR):
nbr_nan = np.isnan(DATA[index_start:index_end+1,
target_station_index, var])
nbr_nan = float(np.sum(nbr_nan))
nan_percent = round(nbr_nan / total_nbr_data * 100, 1)
table1 += '''<td align="center">
%d<br>(%0.1f %%)
</td>''' % (nbr_nan, nan_percent)
table1 += '''
</tr>
<tr>
<td colspan="5"><hr></td>
</tr>
</table>
<br><br>
'''
# --------------------------------------------------- corr. coeff. table --
table2 = table1
table2 += '''
<p align="justify">
<font size="3">
Table 2 : Altitude difference, horizontal distance and
correlation coefficients for each meteorological variables,
calculated between station <b>%s</b> and its neighboring
stations :
<\font>
</p>
''' % TARGET.name
# ---- HEADER ----
table2 += '''
<table border="0" cellpadding="3" cellspacing="0"
align="center" width="100%%">
<tr>
<td colspan="9"><hr></td>
</tr>
<tr>
<td align="center" valign="bottom" width=30 rowspan="3">
#
</td>
<td align="left" valign="bottom" width=200 rowspan="3">
Neighboring Stations
</td>
<td width=60 align="center" valign="bottom" rowspan="3">
ΔAlt.<br>(m)
</td>
<td width=60 align="center" valign="bottom" rowspan="3">
Dist.<br>(km)
</td>
<td align="center" valign="middle" colspan="4">
Correlation Coefficients
</td>
</tr>
<tr>
<td colspan="4"><hr></td>
</tr>
<tr>
<td width=60 align="center" valign="middle">
T<sub>max</sub>
</td>
<td width=60 align="center" valign="middle">
T<sub>min</sub>
</td>
<td width=60 align="center" valign="middle">
T<sub>mean</sub>
</td>
<td width=60 align="center" valign="middle">
P<sub>tot</sub>
</td>
</tr>
<tr>
<td colspan="9"><hr></td>
</tr>
'''
color = ['transparent', StyleDB().lightgray]
index = list(range(nSTA))
index.remove(target_station_index)
counter = 0
for i in index:
# ---- Counter and Neighboring station names ----
table2 += '''
<tr bgcolor="%s">
<td align="center" valign="top">%02d</td>
<td valign="top">
%s
</td>
''' % (color[counter % 2], counter+1, STANAME[i])
# ---- Altitude diff. ----
if abs(ALTDIFF[i]) >= limitAlt and limitAlt >= 0:
fontcolor = StyleDB().red
else:
fontcolor = ''
table2 += '''
<td align="center" valign="top">
<font color="%s">%0.1f</font>
</td>
''' % (fontcolor, ALTDIFF[i])
# ---- Horiz. distance ----
if HORDIST[i] >= limitDist and limitDist >= 0:
fontcolor = StyleDB().red
else:
fontcolor = ''
table2 += '''
<td align="center" valign="top">
<font color="%s">%0.1f</font>
</td>
''' % (fontcolor, HORDIST[i])
# ---- correlation coefficients ----
for value in CORCOEF[:, i]:
if value > 0.7:
fontcolor = ''
else:
fontcolor = StyleDB().red
table2 += '''
<td align="center" valign="top">
<font color="%s">%0.3f</font>
</td>
''' % (fontcolor, value)
table2 += '</tr>'
counter += 1
table2 += ''' <tr>
<td colspan="8"><hr></td>
</tr>
<tr>
<td align="justify" colspan="8">
<font size="2">
* Correlation coefficients are set to
<font color="#C83737">NaN</font> for a given
variable if there is less than
<font color="#C83737">%d</font> pairs of data
between the target and the neighboring station.
</font>
</td>
</tr>
</table>
''' % (Ndata_limit)
return table2, target_info
# =============================================================================
class GapFill_Parameters():
# Class that contains all the relevant parameters for the gapfilling
# procedure. Main instance of this class in the code is <FILLPARAM>.
def __init__(self):
self.time_start = 0 # Fill and Save start date.
self.time_end = 0 # Fill and Save end date.
self.index_start = 0 # Time index for start date
self.index_end = 0 # Time index for end date
self.regression_mode = True
# Max number of neighboring station to use in the regression model.
self.NSTAmax = 0
# Cutoff limit for the horizontal distance between the target and the
# neighboring stations.
self.limitDist = 0
# Cutoff limit for the altitude difference between the target and the
# neighboring stations.
self.limitAlt = 0
# =============================================================================
class GapFillDisplayTable(QTableWidget):
"""
Widget for displaying usefull information for the gapfilling of daily
datasets.
"""
def __init__(self, parent=None):
super(GapFillDisplayTable, self).__init__(parent)
self.initUI()
def initUI(self):
# ------------------------------------------------------------ Style --
self.setFrameStyle(StyleDB().frame)
self.setShowGrid(False)
self.setAlternatingRowColors(True)
self.setMinimumWidth(650)
# ----------------------------------------------------------- Header --
HEADER = ['Neighboring Stations', 'ΔAlt.<br>(m)', 'Dist.<br>(km)',
'T<sub>max</sub>', 'T<sub>min</sub>', 'T<sub>mean</sub>',
'P<sub>tot</sub>']
myHeader = MyHorizHeader(self)
self.setHorizontalHeader(myHeader)
self.setColumnCount(len(HEADER))
self.setHorizontalHeaderLabels(HEADER)
self.verticalHeader().hide()
# ----------------------------------------------- Column Size Policy --
w1 = 65
w2 = 50
self.setColumnWidth(1, w1)
self.setColumnWidth(2, w1)
# self.setColumnHidden(2, True)
self.setColumnWidth(3, w2)
self.setColumnWidth(4, w2)
self.setColumnWidth(5, w2)
self.setColumnWidth(6, w2)
self.horizontalHeader().setSectionResizeMode(QHeaderView.Fixed)
self.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
class NumTableWidgetItem(QTableWidgetItem):
# To be able to sort numerical float item within a given column.
# http://stackoverflow.com/questions/12673598/
# python-numerical-sorting-in-qtablewidget
def __init__(self, text, sortKey):
QTableWidgetItem.__init__(self, text, QTableWidgetItem.UserType)
self.sortKey = sortKey
# Qt uses a simple < check for sorting items, override this to use
# the sortKey
def __lt__(self, other):
if np.isnan(self.sortKey):
return True
else:
return abs(self.sortKey) < abs(other.sortKey)
# =========================================================================
def populate_table(self, TARGET, WEATHER, FILLPARAM):
red = StyleDB().red
# ---------------------------------------------------- Organize Info --
STANAME = WEATHER.STANAME
CLIMATEID = WEATHER.ClimateID
HORDIST = TARGET.HORDIST
ALTDIFF = TARGET.ALTDIFF
CORCOEF = TARGET.CORCOEF
limitDist = FILLPARAM.limitDist
limitAlt = FILLPARAM.limitAlt
nVAR = len(WEATHER.VARNAME)
# ------------------------------------------------------- Fill Table --
nSTA = len(STANAME)
self.setRowCount(nSTA - 1)
indxs = list(range(nSTA))
indxs.remove(TARGET.index)
row = 0
self.setSortingEnabled(False)
for indx in indxs:
# -- Weather Station --
col = 0
item = QTableWidgetItem(STANAME[indx])
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item.setToolTip('%s (%s)' % (STANAME[indx], CLIMATEID[indx]))
self.setItem(row, col, item)
# -- Alt. Diff. --
col += 1
item = self.NumTableWidgetItem('%0.1f' % ALTDIFF[indx],
ALTDIFF[indx])
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setTextAlignment(Qt.AlignCenter)
if abs(ALTDIFF[indx]) >= limitAlt and limitAlt >= 0:
item.setForeground(QBrush(QColor(red)))
self.setItem(row, col, item)
# -- Horiz. Dist. --
col += 1
item = self.NumTableWidgetItem('%0.1f' % HORDIST[indx],
HORDIST[indx])
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setTextAlignment(Qt.AlignCenter)
if HORDIST[indx] >= limitDist and limitDist >= 0:
item.setForeground(QBrush(QColor(red)))
self.setItem(row, col, item)
# ---- Correlation Coefficients ----
for var in range(nVAR):
col += 1
item = self.NumTableWidgetItem('%0.3f' % CORCOEF[var, indx],
CORCOEF[var, indx])
item.setFlags(item.flags() & ~Qt.ItemIsEditable)
item.setTextAlignment(Qt.AlignCenter)
if CORCOEF[var, indx] < 0.7 or np.isnan(CORCOEF[var, indx]):
item.setForeground(QBrush(QColor(red)))
self.setItem(row, col, item)
row += 1
self.setSortingEnabled(True)
class MyHorizHeader(QHeaderView):
# https://forum.qt.io/topic/30598/
# solved-how-to-display-subscript-text-in-header-of-qtableview/5
# http://stackoverflow.com/questions/1956542/
# how-to-make-item-view-render-rich-html-text-in-qt
# http://stackoverflow.com/questions/2336079/
# can-i-have-more-than-one-line-in-a-table-header-in-qt
def __init__(self, parent=None):
super(MyHorizHeader, self).__init__(Qt.Horizontal, parent)
self.parent = parent
# http://stackoverflow.com/questions/18777554/
# why-wont-my-custom-qheaderview-allow-sorting/18777555#18777555
self.setSectionsClickable(True)
self.setHighlightSections(True)
self.showMouseOverLabel = True
self.showSectionSep = False
self.showMouseOverSection = False
self.multirow = True
self.setSortIndicatorShown(False)
self.heightHint = 20
def paintEvent(self, event):
qp = QPainter()
qp.begin(self.viewport())
# print self.sender()
print(event.region)
if self.showSectionSep:
QHeaderView.paintEvent(self, event)
else:
qp.save()
self.paintHeader(qp)
qp.restore()
qp.save()
self.paintLabels(qp)
qp.restore()
qp.end()
def paintHeader(self, qp):
# Paint the header box for the entire width of the table.
# This eliminates the separators between each individual section.
opt = QStyleOptionHeader()
opt.rect = QRect(0, 0, self.size().width(), self.size().height())
self.style().drawControl(QStyle.CE_Header, opt, qp, self)
def paintSection(self, painter, rect, logicalIndex):
# http://qt4-x11.sourcearchive.com/documentation/4.4.3/
# classQHeaderView_fd6972445e0a4a0085538a3f620b03d1.
# html#fd6972445e0a4a0085538a3f620b03d1
if not rect.isValid():
return
# ------------------------------------------ draw header sections ----
opt = QStyleOptionHeader()
self.initStyleOption(opt)
#
# print(self.model().headerData(logicalIndex, self.orientation()))
#
opt.rect = rect
opt.section = logicalIndex
opt.text = ""
# print dir(opt)
# print opt.SO_TabWidgetFrame
# print int(QStyle.State_MouseOver)
# ----------------------------------------------- section position ----
# visual = self.visualIndex(logicalIndex)
# if self.count() == 1:
# opt.position = QStyleOptionHeader.OnlyOneSection
# elif visual == 0:
# opt.position = QStyleOptionHeader.Beginning
# elif visual == self.count() - 1:
# opt.position = QStyleOptionHeader.End
# else:
# opt.position = QStyleOptionHeader.Middle
#
# sortIndicatorSection = self.sortIndicatorSection()
# if sortIndicatorSection==logicalIndex:
# opt.state = int(opt.state) + int(QStyle.State_Sunken)
# ------------------------------------------- mouse over highlight ----
if self.showMouseOverSection:
mouse_pos = self.mapFromGlobal(QCursor.pos())
if rect.contains(mouse_pos):
opt.state = int(opt.state) + 8192
else:
pass
else:
pass
# -------------------------------------------------- paint section ----
self.style().drawControl(QStyle.CE_Header, opt, painter, self)
def paintLabels(self, qp):
fontfamily = StyleDB().fontfamily
if self.multirow:
headerTable = '''
<table border="0" cellpadding="0" cellspacing="0"
align="center" width="100%%">
<tr>
<td colspan="3"></td>
<td colspan="4" align=center
style="padding-top:8px;
font-size:14px;
font-family: "%s";">
Correlation Coefficients
</td>
</tr>
<tr>
<td colspan="3"></td>
<td colspan="4"><hr width=100%%></td>
</tr>
<tr>
''' % fontfamily
else:
headerTable = '''
<table border="0" cellpadding="0" cellspacing="0"
align="center" width="100%%">
<tr>
'''
# shownSectionCount = self.count() - self.hiddenSectionCount ()
# ---------------------------------- prepare a list of logical index --
LogicalIndex_shown_and_ordered = []
for visualIndex in range(self.count()):
logicalIndex = self.logicalIndex(visualIndex)
if self.isSectionHidden(logicalIndex):
pass
else:
LogicalIndex_shown_and_ordered.append(logicalIndex)
x0 = 0
for logicalIndex in LogicalIndex_shown_and_ordered:
# ---------------------------------------------- Grab label text --
label = str(self.model().headerData(logicalIndex,
self.orientation()))
# ------------------------------------------ Put Labels in Table --
# Highlights labels when item is selected in column :
if self.highlightSections():
selectedIndx = self.selectionModel().selectedIndexes()
for index in selectedIndx:
if (logicalIndex == index.column()) is True:
label = '<b>%s<b>' % label
break
# OR
# Highlights labels when mouse it over section :
sectionHeight = self.size().height()
sectionWidth = self.sectionSize(logicalIndex)
rect = QRect(x0, 0, sectionWidth, sectionHeight)
x0 += sectionWidth
if self.showMouseOverLabel:
mouse_pos = self.mapFromGlobal(QCursor.pos())
if rect.contains(mouse_pos):
label = '<b>%s<b>' % label
headerTable += '''
<td valign=middle align=center width=%d
style="padding-top:0px; padding-bottom:0px;
font-size:14px;
font-family:"%s";">
%s
</td>
''' % (sectionWidth, fontfamily, label)
# ---------------------------------------------------- Add Sort Icon --
headerTable += '</tr><tr>'
sortIndicatorSection = self.sortIndicatorSection()
if self.sortIndicatorOrder() == Qt.SortOrder.DescendingOrder:
filename = 'Icons/triangle_up_center.png'
else:
filename = 'Icons/triangle_down_center.png'
for logicalIndex in LogicalIndex_shown_and_ordered:
sectionWidth = self.sectionSize(logicalIndex)
if logicalIndex == sortIndicatorSection:
txt = '<img src="%s">' % filename
else:
txt = ''
headerTable += '''
<td valign=middle align=center width=%d
style="padding-top:0px; padding-bottom:0px;">
%s
</td>
''' % (sectionWidth, txt)
# ---- Prepare html ----
headerTable += '''
</tr>
</table>
'''
TextDoc = QTextDocument()
TextDoc.setTextWidth(self.size().width())
TextDoc.setDocumentMargin(0)
TextDoc.setHtml(headerTable)
self.setFixedHeight(TextDoc.size().height())
self.heightHint = TextDoc.size().height()
# ---- Draw html ----
rec = QRect(0, 0, self.size().width(), self.size().height())
TextDoc.drawContents(qp, rec)
def sizeHint(self):
baseSize = QHeaderView.sizeHint(self)
baseSize.setHeight(self.heightHint)
self.parent.repaint()
return baseSize
# ---- if __name__ == '__main__'
if __name__ == '__main__':
import platform
import sys
app = QApplication(sys.argv)
if platform.system() == 'Windows':
app.setFont(QFont('Segoe UI', 11))
elif platform.system() == 'Linux':
app.setFont(QFont('Ubuntu', 11))
w = GapFillWeatherGUI()
w.set_workdir("C:\\Users\\jsgosselin\\GWHAT\\Projects\\Example")
w.load_data_dir_content()
lat = w.gapfill_worker.WEATHER.LAT
lon = w.gapfill_worker.WEATHER.LON
name = w.gapfill_worker.WEATHER.STANAME
alt = w.gapfill_worker.WEATHER.ALT
# ---- Show Map ----
GWSU16 = [50.525687, -110.64174514]
GWSU24 = [50.368081, -111.14447737]
GWSU34 = [50.446457, -111.0195349]
# stamap = StaLocManager()
# stamap.plot_stations(lat, lon, name)
# stamap.plot_obswells(GWSU16[0], GWSU16[1], 'GW-SU-16')
# stamap.plot_obswells(GWSU24[0], GWSU24[1], 'GW-SU-24')
# stamap.plot_obswells(GWSU34[0], GWSU34[1], 'GW-SU-34')
# stamap.show()
# ---- Show and Move Center ----
w.show()
qr = w.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
w.move(qr.topLeft())
sys.exit(app.exec_())
| gpl-3.0 |
amolkahat/pandas | setup.py | 3 | 26817 | #!/usr/bin/env python
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import os
from os.path import join as pjoin
import pkg_resources
import sys
import shutil
from distutils.version import LooseVersion
from setuptools import setup, Command, find_packages
# versioning
import versioneer
cmdclass = versioneer.get_cmdclass()
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
min_numpy_ver = '1.12.0'
setuptools_kwargs = {
'install_requires': [
'python-dateutil >= 2.5.0',
'pytz >= 2011k',
'numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver),
],
'setup_requires': ['numpy >= {numpy_ver}'.format(numpy_ver=min_numpy_ver)],
'zip_safe': False,
}
min_cython_ver = '0.28.2'
try:
import Cython
ver = Cython.__version__
from Cython.Build import cythonize
_CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_INSTALLED = False
cythonize = lambda x, *args, **kwargs: x # dummy func
# The import of Extension must be after the import of Cython, otherwise
# we do not get the appropriately patched class.
# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html
from distutils.extension import Extension # noqa:E402
from distutils.command.build import build # noqa:E402
try:
if not _CYTHON_INSTALLED:
raise ImportError('No supported version of Cython installed.')
from Cython.Distutils.old_build_ext import old_build_ext as _build_ext
cython = True
except ImportError:
from distutils.command.build_ext import build_ext as _build_ext
cython = False
else:
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise ImportError('Building pandas requires Tempita: '
'pip install Tempita')
_pxi_dep_template = {
'algos': ['_libs/algos_common_helper.pxi.in',
'_libs/algos_take_helper.pxi.in',
'_libs/algos_rank_helper.pxi.in'],
'groupby': ['_libs/groupby_helper.pxi.in'],
'hashtable': ['_libs/hashtable_class_helper.pxi.in',
'_libs/hashtable_func_helper.pxi.in'],
'index': ['_libs/index_class_helper.pxi.in'],
'sparse': ['_libs/sparse_op_helper.pxi.in'],
'interval': ['_libs/intervaltree.pxi.in']}
_pxifiles = []
_pxi_dep = {}
for module, files in _pxi_dep_template.items():
pxi_files = [pjoin('pandas', x) for x in files]
_pxifiles.extend(pxi_files)
_pxi_dep[module] = pxi_files
class build_ext(_build_ext):
@classmethod
def render_templates(cls, pxifiles):
for pxifile in pxifiles:
# build pxifiles first, template extension must be .pxi.in
assert pxifile.endswith('.pxi.in')
outfile = pxifile[:-3]
if (os.path.exists(outfile) and
os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime):
# if .pxi.in is not updated, no need to output .pxi
continue
with open(pxifile, "r") as f:
tmpl = f.read()
pyxcontent = tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
def build_extensions(self):
# if building from c files, don't need to
# generate template output
if cython:
self.render_templates(_pxifiles)
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
DESCRIPTION = ("Powerful data structures for data analysis, time series, "
"and statistics")
LONG_DESCRIPTION = """
**pandas** is a Python package providing fast, flexible, and expressive data
structures designed to make working with structured (tabular, multidimensional,
potentially heterogeneous) and time series data both easy and intuitive. It
aims to be the fundamental high-level building block for doing practical,
**real world** data analysis in Python. Additionally, it has the broader goal
of becoming **the most powerful and flexible open source data analysis /
manipulation tool available in any language**. It is already well on its way
toward this goal.
pandas is well suited for many different kinds of data:
- Tabular data with heterogeneously-typed columns, as in an SQL table or
Excel spreadsheet
- Ordered and unordered (not necessarily fixed-frequency) time series data.
- Arbitrary matrix data (homogeneously typed or heterogeneous) with row and
column labels
- Any other form of observational / statistical data sets. The data actually
need not be labeled at all to be placed into a pandas data structure
The two primary data structures of pandas, Series (1-dimensional) and DataFrame
(2-dimensional), handle the vast majority of typical use cases in finance,
statistics, social science, and many areas of engineering. For R users,
DataFrame provides everything that R's ``data.frame`` provides and much
more. pandas is built on top of `NumPy <http://www.numpy.org>`__ and is
intended to integrate well within a scientific computing environment with many
other 3rd party libraries.
Here are just a few of the things that pandas does well:
- Easy handling of **missing data** (represented as NaN) in floating point as
well as non-floating point data
- Size mutability: columns can be **inserted and deleted** from DataFrame and
higher dimensional objects
- Automatic and explicit **data alignment**: objects can be explicitly
aligned to a set of labels, or the user can simply ignore the labels and
let `Series`, `DataFrame`, etc. automatically align the data for you in
computations
- Powerful, flexible **group by** functionality to perform
split-apply-combine operations on data sets, for both aggregating and
transforming data
- Make it **easy to convert** ragged, differently-indexed data in other
Python and NumPy data structures into DataFrame objects
- Intelligent label-based **slicing**, **fancy indexing**, and **subsetting**
of large data sets
- Intuitive **merging** and **joining** data sets
- Flexible **reshaping** and pivoting of data sets
- **Hierarchical** labeling of axes (possible to have multiple labels per
tick)
- Robust IO tools for loading data from **flat files** (CSV and delimited),
Excel files, databases, and saving / loading data from the ultrafast **HDF5
format**
- **Time series**-specific functionality: date range generation and frequency
conversion, moving window statistics, moving window linear regressions,
date shifting and lagging, etc.
Many of these principles are here to address the shortcomings frequently
experienced using other languages / scientific research environments. For data
scientists, working with data is typically divided into multiple stages:
munging and cleaning data, analyzing / modeling it, then organizing the results
of the analysis into a form suitable for plotting or tabular display. pandas is
the ideal tool for all of these tasks.
"""
DISTNAME = 'pandas'
LICENSE = 'BSD'
AUTHOR = "The PyData Development Team"
EMAIL = "[email protected]"
URL = "http://pandas.pydata.org"
DOWNLOAD_URL = ''
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering']
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
base = pjoin('pandas', '_libs', 'src')
tsbase = pjoin('pandas', '_libs', 'tslibs', 'src')
dt = pjoin(tsbase, 'datetime')
util = pjoin('pandas', 'util')
parser = pjoin(base, 'parser')
ujson_python = pjoin(base, 'ujson', 'python')
ujson_lib = pjoin(base, 'ujson', 'lib')
self._clean_exclude = [pjoin(dt, 'np_datetime.c'),
pjoin(dt, 'np_datetime_strings.c'),
pjoin(parser, 'tokenizer.c'),
pjoin(parser, 'io.c'),
pjoin(ujson_python, 'ujson.c'),
pjoin(ujson_python, 'objToJSON.c'),
pjoin(ujson_python, 'JSONtoObj.c'),
pjoin(ujson_lib, 'ultrajsonenc.c'),
pjoin(ujson_lib, 'ultrajsondec.c'),
pjoin(util, 'move.c'),
]
for root, dirs, files in os.walk('pandas'):
for f in files:
filepath = pjoin(root, f)
if filepath in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(filepath)
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
# clean the generated pxi files
for pxifile in _pxifiles:
pxifile = pxifile.replace(".pxi.in", ".pxi")
self._clean_me.append(pxifile)
for d in ('build', 'dist'):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except Exception:
pass
# we need to inherit from the versioneer
# class as it encodes the version info
sdist_class = cmdclass['sdist']
class CheckSDist(sdist_class):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = ['pandas/_libs/lib.pyx',
'pandas/_libs/hashtable.pyx',
'pandas/_libs/tslib.pyx',
'pandas/_libs/index.pyx',
'pandas/_libs/internals.pyx',
'pandas/_libs/algos.pyx',
'pandas/_libs/join.pyx',
'pandas/_libs/indexing.pyx',
'pandas/_libs/interval.pyx',
'pandas/_libs/hashing.pyx',
'pandas/_libs/missing.pyx',
'pandas/_libs/reduction.pyx',
'pandas/_libs/testing.pyx',
'pandas/_libs/skiplist.pyx',
'pandas/_libs/sparse.pyx',
'pandas/_libs/ops.pyx',
'pandas/_libs/parsers.pyx',
'pandas/_libs/tslibs/ccalendar.pyx',
'pandas/_libs/tslibs/period.pyx',
'pandas/_libs/tslibs/strptime.pyx',
'pandas/_libs/tslibs/np_datetime.pyx',
'pandas/_libs/tslibs/timedeltas.pyx',
'pandas/_libs/tslibs/timestamps.pyx',
'pandas/_libs/tslibs/timezones.pyx',
'pandas/_libs/tslibs/conversion.pyx',
'pandas/_libs/tslibs/fields.pyx',
'pandas/_libs/tslibs/offsets.pyx',
'pandas/_libs/tslibs/frequencies.pyx',
'pandas/_libs/tslibs/resolution.pyx',
'pandas/_libs/tslibs/parsing.pyx',
'pandas/_libs/writers.pyx',
'pandas/io/sas/sas.pyx']
_cpp_pyxfiles = ['pandas/_libs/window.pyx',
'pandas/io/msgpack/_packer.pyx',
'pandas/io/msgpack/_unpacker.pyx']
def initialize_options(self):
sdist_class.initialize_options(self)
def run(self):
if 'cython' in cmdclass:
self.run_command('cython')
else:
# If we are not running cython then
# compile the extensions correctly
pyx_files = [(self._pyxfiles, 'c'), (self._cpp_pyxfiles, 'cpp')]
for pyxfiles, extension in pyx_files:
for pyxfile in pyxfiles:
sourcefile = pyxfile[:-3] + extension
msg = ("{extension}-source file '{source}' not found.\n"
"Run 'setup.py cython' before sdist.".format(
source=sourcefile, extension=extension))
assert os.path.isfile(sourcefile), msg
sdist_class.run(self)
class CheckingBuildExt(build_ext):
"""
Subclass build_ext to get clearer report if Cython is necessary.
"""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print("{}: -> [{}]".format(ext.name, ext.sources))
raise Exception("""Cython-generated file '{src}' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
""".format(src=src))
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""
Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op.
"""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass.update({'clean': CleanCommand,
'build': build})
try:
from wheel.bdist_wheel import bdist_wheel
class BdistWheel(bdist_wheel):
def get_tag(self):
tag = bdist_wheel.get_tag(self)
repl = 'macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64'
if tag[2] == 'macosx_10_6_intel':
tag = (tag[0], tag[1], repl)
return tag
cmdclass['bdist_wheel'] = BdistWheel
except ImportError:
pass
if cython:
suffix = '.pyx'
cmdclass['build_ext'] = CheckingBuildExt
cmdclass['cython'] = CythonCommand
else:
suffix = '.c'
cmdclass['build_src'] = DummyBuildSrc
cmdclass['build_ext'] = CheckingBuildExt
# ----------------------------------------------------------------------
# Preparation of compiler arguments
if sys.byteorder == 'big':
endian_macro = [('__BIG_ENDIAN__', '1')]
else:
endian_macro = [('__LITTLE_ENDIAN__', '1')]
if is_platform_windows():
extra_compile_args = []
else:
# args to ignore warnings
extra_compile_args = ['-Wno-unused-function']
# enable coverage by building cython files by setting the environment variable
# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext
# with `--with-cython-coverage`enabled
linetrace = os.environ.get('PANDAS_CYTHON_COVERAGE', False)
if '--with-cython-coverage' in sys.argv:
linetrace = True
sys.argv.remove('--with-cython-coverage')
# Note: if not using `cythonize`, coverage can be enabled by
# pinning `ext.cython_directives = directives` to each ext in extensions.
# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy
directives = {'linetrace': False}
macros = []
if linetrace:
# https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
directives['linetrace'] = True
macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]
# ----------------------------------------------------------------------
# Specification of Dependencies
# TODO: Need to check to see if e.g. `linetrace` has changed and possibly
# re-compile.
def maybe_cythonize(extensions, *args, **kwargs):
"""
Render tempita templates before calling cythonize
"""
if len(sys.argv) > 1 and 'clean' in sys.argv:
# Avoid running cythonize on `python setup.py clean`
# See https://github.com/cython/cython/issues/1495
return extensions
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
# TODO: Is this really necessary here?
for ext in extensions:
if (hasattr(ext, 'include_dirs') and
numpy_incl not in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
if cython:
build_ext.render_templates(_pxifiles)
return cythonize(extensions, *args, **kwargs)
else:
return extensions
def srcpath(name=None, suffix='.pyx', subdir='src'):
return pjoin('pandas', subdir, name + suffix)
common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src']
ts_include = ['pandas/_libs/tslibs/src']
lib_depends = ['pandas/_libs/src/parse_helper.h',
'pandas/_libs/src/compat_helper.h']
np_datetime_headers = [
'pandas/_libs/tslibs/src/datetime/np_datetime.h',
'pandas/_libs/tslibs/src/datetime/np_datetime_strings.h']
np_datetime_sources = [
'pandas/_libs/tslibs/src/datetime/np_datetime.c',
'pandas/_libs/tslibs/src/datetime/np_datetime_strings.c']
tseries_depends = np_datetime_headers
ext_data = {
'_libs.algos': {
'pyxfile': '_libs/algos',
'depends': _pxi_dep['algos']},
'_libs.groupby': {
'pyxfile': '_libs/groupby',
'depends': _pxi_dep['groupby']},
'_libs.hashing': {
'pyxfile': '_libs/hashing',
'include': [],
'depends': []},
'_libs.hashtable': {
'pyxfile': '_libs/hashtable',
'depends': (['pandas/_libs/src/klib/khash_python.h'] +
_pxi_dep['hashtable'])},
'_libs.index': {
'pyxfile': '_libs/index',
'include': common_include + ts_include,
'depends': _pxi_dep['index'],
'sources': np_datetime_sources},
'_libs.indexing': {
'pyxfile': '_libs/indexing'},
'_libs.internals': {
'pyxfile': '_libs/internals'},
'_libs.interval': {
'pyxfile': '_libs/interval',
'depends': _pxi_dep['interval']},
'_libs.join': {
'pyxfile': '_libs/join'},
'_libs.lib': {
'pyxfile': '_libs/lib',
'include': common_include + ts_include,
'depends': lib_depends + tseries_depends},
'_libs.missing': {
'pyxfile': '_libs/missing',
'include': common_include + ts_include,
'depends': tseries_depends},
'_libs.parsers': {
'pyxfile': '_libs/parsers',
'depends': ['pandas/_libs/src/parser/tokenizer.h',
'pandas/_libs/src/parser/io.h'],
'sources': ['pandas/_libs/src/parser/tokenizer.c',
'pandas/_libs/src/parser/io.c']},
'_libs.reduction': {
'pyxfile': '_libs/reduction'},
'_libs.ops': {
'pyxfile': '_libs/ops'},
'_libs.properties': {
'pyxfile': '_libs/properties',
'include': []},
'_libs.reshape': {
'pyxfile': '_libs/reshape',
'depends': []},
'_libs.skiplist': {
'pyxfile': '_libs/skiplist',
'depends': ['pandas/_libs/src/skiplist.h']},
'_libs.sparse': {
'pyxfile': '_libs/sparse',
'depends': _pxi_dep['sparse']},
'_libs.tslib': {
'pyxfile': '_libs/tslib',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.ccalendar': {
'pyxfile': '_libs/tslibs/ccalendar',
'include': []},
'_libs.tslibs.conversion': {
'pyxfile': '_libs/tslibs/conversion',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.fields': {
'pyxfile': '_libs/tslibs/fields',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.frequencies': {
'pyxfile': '_libs/tslibs/frequencies',
'include': []},
'_libs.tslibs.nattype': {
'pyxfile': '_libs/tslibs/nattype',
'include': []},
'_libs.tslibs.np_datetime': {
'pyxfile': '_libs/tslibs/np_datetime',
'include': ts_include,
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.offsets': {
'pyxfile': '_libs/tslibs/offsets',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.parsing': {
'pyxfile': '_libs/tslibs/parsing',
'include': []},
'_libs.tslibs.period': {
'pyxfile': '_libs/tslibs/period',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.resolution': {
'pyxfile': '_libs/tslibs/resolution',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.strptime': {
'pyxfile': '_libs/tslibs/strptime',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timedeltas': {
'pyxfile': '_libs/tslibs/timedeltas',
'include': ts_include,
'depends': np_datetime_headers,
'sources': np_datetime_sources},
'_libs.tslibs.timestamps': {
'pyxfile': '_libs/tslibs/timestamps',
'include': ts_include,
'depends': tseries_depends,
'sources': np_datetime_sources},
'_libs.tslibs.timezones': {
'pyxfile': '_libs/tslibs/timezones',
'include': []},
'_libs.testing': {
'pyxfile': '_libs/testing'},
'_libs.window': {
'pyxfile': '_libs/window',
'language': 'c++',
'suffix': '.cpp'},
'_libs.writers': {
'pyxfile': '_libs/writers'},
'io.sas._sas': {
'pyxfile': 'io/sas/sas'},
'io.msgpack._packer': {
'macros': endian_macro + macros,
'depends': ['pandas/_libs/src/msgpack/pack.h',
'pandas/_libs/src/msgpack/pack_template.h'],
'include': ['pandas/_libs/src/msgpack'] + common_include,
'language': 'c++',
'suffix': '.cpp',
'pyxfile': 'io/msgpack/_packer',
'subdir': 'io/msgpack'},
'io.msgpack._unpacker': {
'depends': ['pandas/_libs/src/msgpack/unpack.h',
'pandas/_libs/src/msgpack/unpack_define.h',
'pandas/_libs/src/msgpack/unpack_template.h'],
'macros': endian_macro + macros,
'include': ['pandas/_libs/src/msgpack'] + common_include,
'language': 'c++',
'suffix': '.cpp',
'pyxfile': 'io/msgpack/_unpacker',
'subdir': 'io/msgpack'
}
}
extensions = []
for name, data in ext_data.items():
source_suffix = suffix if suffix == '.pyx' else data.get('suffix', '.c')
sources = [srcpath(data['pyxfile'], suffix=source_suffix, subdir='')]
sources.extend(data.get('sources', []))
include = data.get('include', common_include)
obj = Extension('pandas.{name}'.format(name=name),
sources=sources,
depends=data.get('depends', []),
include_dirs=include,
language=data.get('language', 'c'),
define_macros=data.get('macros', macros),
extra_compile_args=extra_compile_args)
extensions.append(obj)
# ----------------------------------------------------------------------
# ujson
if suffix == '.pyx':
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith(('.c', '.cpp')):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension('pandas._libs.json',
depends=['pandas/_libs/src/ujson/lib/ultrajson.h'],
sources=(['pandas/_libs/src/ujson/python/ujson.c',
'pandas/_libs/src/ujson/python/objToJSON.c',
'pandas/_libs/src/ujson/python/JSONtoObj.c',
'pandas/_libs/src/ujson/lib/ultrajsonenc.c',
'pandas/_libs/src/ujson/lib/ultrajsondec.c'] +
np_datetime_sources),
include_dirs=['pandas/_libs/src/ujson/python',
'pandas/_libs/src/ujson/lib',
'pandas/_libs/src/datetime'],
extra_compile_args=(['-D_GNU_SOURCE'] +
extra_compile_args),
define_macros=macros)
extensions.append(ujson_ext)
# ----------------------------------------------------------------------
# util
# extension for pseudo-safely moving bytes into mutable buffers
_move_ext = Extension('pandas.util._move',
depends=[],
sources=['pandas/util/move.c'],
define_macros=macros)
extensions.append(_move_ext)
# The build cache system does string matching below this point.
# if you change something, be careful.
setup(name=DISTNAME,
maintainer=AUTHOR,
version=versioneer.get_version(),
packages=find_packages(include=['pandas', 'pandas.*']),
package_data={'': ['templates/*', '_libs/*.dll']},
ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
maintainer_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
cmdclass=cmdclass,
url=URL,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms='any',
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
**setuptools_kwargs)
| bsd-3-clause |
Featuretools/featuretools | featuretools/demo/mock_customer.py | 1 | 4323 | import pandas as pd
from numpy import random
from numpy.random import choice
import featuretools as ft
from featuretools.variable_types import Categorical, ZIPCode
def load_mock_customer(n_customers=5, n_products=5, n_sessions=35, n_transactions=500,
random_seed=0, return_single_table=False, return_entityset=False):
"""Return dataframes of mock customer data"""
random.seed(random_seed)
last_date = pd.to_datetime('12/31/2013')
first_date = pd.to_datetime('1/1/2008')
first_bday = pd.to_datetime('1/1/1970')
join_dates = [random.uniform(0, 1) * (last_date - first_date) + first_date
for _ in range(n_customers)]
birth_dates = [random.uniform(0, 1) * (first_date - first_bday) + first_bday
for _ in range(n_customers)]
customers_df = pd.DataFrame({"customer_id": range(1, n_customers + 1)})
customers_df["zip_code"] = choice(["60091", "13244"], n_customers,)
customers_df["join_date"] = pd.Series(join_dates).dt.round('1s')
customers_df["date_of_birth"] = pd.Series(birth_dates).dt.round('1d')
products_df = pd.DataFrame({"product_id": pd.Categorical(range(1, n_products + 1))})
products_df["brand"] = choice(["A", "B", "C"], n_products)
sessions_df = pd.DataFrame({"session_id": range(1, n_sessions + 1)})
sessions_df["customer_id"] = choice(customers_df["customer_id"], n_sessions)
sessions_df["device"] = choice(["desktop", "mobile", "tablet"], n_sessions)
transactions_df = pd.DataFrame({"transaction_id": range(1, n_transactions + 1)})
transactions_df["session_id"] = choice(sessions_df["session_id"], n_transactions)
transactions_df = transactions_df.sort_values("session_id").reset_index(drop=True)
transactions_df["transaction_time"] = pd.date_range('1/1/2014', periods=n_transactions, freq='65s') # todo make these less regular
transactions_df["product_id"] = pd.Categorical(choice(products_df["product_id"], n_transactions))
transactions_df["amount"] = random.randint(500, 15000, n_transactions) / 100
# calculate and merge in session start
# based on the times we came up with for transactions
session_starts = transactions_df.drop_duplicates("session_id")[["session_id", "transaction_time"]].rename(columns={"transaction_time": "session_start"})
sessions_df = sessions_df.merge(session_starts)
if return_single_table:
return transactions_df.merge(sessions_df).merge(customers_df).merge(products_df).reset_index(drop=True)
elif return_entityset:
es = ft.EntitySet(id="transactions")
es = es.entity_from_dataframe(entity_id="transactions",
dataframe=transactions_df,
index="transaction_id",
time_index="transaction_time",
variable_types={"product_id": Categorical})
es = es.entity_from_dataframe(entity_id="products",
dataframe=products_df,
index="product_id")
es = es.entity_from_dataframe(entity_id="sessions",
dataframe=sessions_df,
index="session_id",
time_index="session_start")
es = es.entity_from_dataframe(entity_id="customers",
dataframe=customers_df,
index="customer_id",
time_index="join_date",
variable_types={"zip_code": ZIPCode})
rels = [ft.Relationship(es["products"]["product_id"],
es["transactions"]["product_id"]),
ft.Relationship(es["sessions"]["session_id"],
es["transactions"]["session_id"]),
ft.Relationship(es["customers"]["customer_id"],
es["sessions"]["customer_id"])]
es = es.add_relationships(rels)
es.add_last_time_indexes()
return es
return {"customers": customers_df,
"sessions": sessions_df,
"transactions": transactions_df,
"products": products_df}
| bsd-3-clause |
elkingtonmcb/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
mmechelke/bayesian_xfel | bxfel/inference/hmc.py | 1 | 8486 |
import ctypes
from threading import Thread, Lock
from Queue import Queue
from multiprocessing import Pool
import multiprocessing as mp
from multiprocessing import sharedctypes
shared_slices = None
shared_data = None
shared_grad = None
#TODO pass likelihood params
def do_energy(params):
slice_index, data_index = params
global shared_slices
global shared_data
ll = GaussianLikelihood(1.)
s = shared_slices[slice_index]
d = shared_data[data_index]
return ll.energy(s,d)
def do_gradient(params):
slice_index, data_index = params
global shared_slices
global shared_data
global shared_gradient
ll = GaussianLikelihood(1.)
s = shared_slices[slice_index]
d = shared_data[data_index]
energy, gradient = ll.gradient(s,d)
return gradient
class GibbsMPsampler(object):
def __init__(self, posterior):
"""
@param alpha: prior hyperparameter of the prior
"""
self._likelihood = posterior._likelihood
self._projection = posterior._projection
self._q = posterior._quadrature
# we need to make sure slices and
# data are shared arrayes
self._data = posterior._data
self._prior = posterior._prior
self._orientations = np.random.choice(len(self._q.R),
size=len(data))
if params is None:
self._params = {}
else:
self._params = params
self.__init__process(n_cpu)
def __init__process(self, n_cpu):
"""
Create populate global data
"""
global shared_slices
global shared_data
shared_slices_base = sharedctypes.RawArray(ctypes.c_double,
self._projection.shape[0])
shared_slices = np.frombuffer(shared_slices_base)
shared_slices = shared_slices.reshape((len(self._q.R), -1))
shared_grad_base = sharedctypes.RawArray(ctypes.c_double,
self._projection.shape[0])
shared_grad = np.frombuffer(shared_grad_base)
shared_grad = shared_grad.reshape((len(self._q.R), -1))
shared_data_base = mp.Array(ctypes.c_double,
self._data.size,
lock=False)
shared_data = np.ctypeslib.as_array(shared_data_base)
shared_data = shared_data.reshape(self._data.shape)
shared_data[:] = self._data
self._pool = mp.Pool(n_cpu)
def energy(self, x):
# project density to slices
global shared_slices
energy = 0
m = len(self._q.R)
slices = self._projection.dot(x.reshape((-1,)))
slices = slices.reshape((m, -1))
slices = np.squeeze(np.asarray(slices))
shared_slices[:] = slices[:]
params = [(self._orientations[i], i)
for i in range(len(data))]
energy_arr = self._pool.map_async(do_energy, params)
# Maybe we have something to do while we wait
if self._prior is not None:
energy = self._prior.energy(x)
energy += np.sum(energy_arr.get())
return energy
def gradient(self, x):
# project density to slices
global shared_slices
m = len(self._q.R)
slices = self._projection.dot(x.reshape((-1,)))
shared_slices = slices.squeeze()
params = [(self._orientations[i], i)
for i in range(len(data))]
grad = self._pool.map(do_gradient, params)
# project back
backproj_grad = self._projection.T.dot(grad.ravel())
backproj_grad = np.squeeze(np.asarray(backproj_grad))
if self._prior is not None:
backproj_grad += self._prior.gradient(x)
return backproj_grad
def __call__(self, x):
return self.energy(x)
def update_rotations(self, x, sample_params=False):
m = len(self._q.R)
slices = self._projection.dot(x.reshape((-1,)))
slices = slices.reshape((m, -1))
slices = np.squeeze(np.asarray(slices))
shared_slices[:] = slices.squeeze()[:]
params = [(j, i)
for i in range(len(data))
for j in range(m)]
energies = self._pool.map(do_energy, params)
energies = np.array(energies).reshape((len(data), m))
p = np.exp(-energies
- sp.misc.logsumexp(-energies,-1)[:,np.newaxis])
for i in range(len(self._data)):
# For the moment let's trust floating point arithmetic
# p_i = p[i]/p[i].sum()
self._orientations[i] = np.random.choice(m, p=p[i])
if sample_params:
for i, d in enumerate(self._data):
old_params = self._params[i]
j = self._orientations[i]
s = slices[j]
self._likelihood.sample_nuissance_params(s, d, old_params)
def run(self, x0, niter, eps=1e-2, sample_params=False, skip=-1, verbose=20):
from csbplus.mcmc import HMC
self.update_rotations(x0)
self._samples = []
self._energies = []
hmc = HMC(potential_energy=self,
nsteps=20, eps=eps)
hmc._adaptphase = 1.
for i in range(niter):
s = hmc.run(x0, 5, return_momenta=False)
x0 = s[-1]
self.update_rotations(x0, sample_params)
if skip == -1:
self._samples.extend(s)
self._energies.extend(map(self.energy, s))
elif i%int(skip)==0 or i == niter-1:
self._samples.extend(s)
self._energies.extend(map(self.energy, s))
if i% verbose == 0:
print "iteration: {}, energy: {}, stepsize: {}".format(i, self._energies[-1], hmc._eps[-1])
return self._energies, self._samples
if __name__ == "__main__":
import numpy as np
import scipy as sp
import pylab as plt
import seaborn
import os
import sys
from xfel.utils.create_data import GaussianSlices
from xfel.io import mrc
from xfel.numeric.quadrature import GaussSO3Quadrature, ChebyshevSO3Quadrature
from xfel.grid.optimize import GaussianLikelihood, Objective, GibbsOrientationSampler
from xfel.grid.interpolation_matrix import compute_slice_interpolation_matrix, get_image_to_sparse_projection
import scipy.ndimage
import time
from scipy.ndimage import zoom
ground_truth = mrc.read(os.path.expanduser("~/projects/xfel/data/phantom/phantom.mrc"))[0]
resolution = 121
ground_truth = zoom(ground_truth, resolution/128.)
read_from_disk = True
from csb.io import load, dump
if read_from_disk is False:
gs = GaussianSlices(ground_truth, scale = 1., sigma = 0.01)
rs, data = gs.generate_data(2048, resolution)
dump(data, "/tmp/test_data.pkl")
else:
data = load("/tmp/test_data.pkl")
rad = 0.95
q = ChebyshevSO3Quadrature(9)
m = len(q.R)
if read_from_disk is False:
proj = compute_slice_interpolation_matrix(q.R, resolution, radius_cutoff=rad)
dump(proj, "/tmp/test_proj.pkl")
else:
proj = load("/tmp/test_proj.pkl")
image_to_vector = get_image_to_sparse_projection(resolution, rad)
d_sparse = np.array([image_to_vector.dot(data[i,:,:].ravel())
for i in range(data.shape[0])])
premult = None
ll = GaussianLikelihood(1.)
opt = HMCsampler(likelihood=ll,
projection=proj,
quadrature=q,
data=d_sparse,
n_cpu = 4)
t0 = time.time()
opt.update_rotations(ground_truth.ravel())
print time.time() - t0
print "Starting to profile"
import cProfile
cProfile.run('opt.update_rotations(ground_truth.ravel())')
print "Done"
opt2 = GibbsOrientationSampler(likelihood=ll,
projection=proj,
quadrature=q,
data=d_sparse)
t0 = time.time()
opt2.update_rotations(ground_truth.ravel())
print time.time() - t0
opt2._orientations = opt._orientations.copy()
t0 = time.time()
energy = opt.energy(ground_truth.ravel())
print time.time() - t0
t0 = time.time()
energy2 = opt2.energy(ground_truth.ravel())
print time.time() - t0
| mit |
lisitsyn/shogun | examples/undocumented/python/graphical/classifier_perceptron_graphical.py | 10 | 2302 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import latex_plot_inits
parameter_list = [[20, 5, 1., 1000, 1, None, 5], [100, 5, 1., 1000, 1, None, 10]]
def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1., max_iter=1000, num_threads=1, seed=None, nperceptrons=5):
from shogun import RealFeatures, BinaryLabels
from shogun import Perceptron
from shogun import MSG_INFO
# 2D data
_DIM = 2
# To get the nice message that the perceptron has converged
dummy = BinaryLabels()
dummy.io.set_loglevel(MSG_INFO)
np.random.seed(seed)
# Produce some (probably) linearly separable training data by hand
# Two Gaussians at a far enough distance
X = np.array(np.random.randn(_DIM,n))+distance
Y = np.array(np.random.randn(_DIM,n))
label_train_twoclass = np.hstack((np.ones(n), -np.ones(n)))
fm_train_real = np.hstack((X,Y))
feats_train = RealFeatures(fm_train_real)
labels = BinaryLabels(label_train_twoclass)
perceptron = Perceptron(feats_train, labels)
perceptron.set_learn_rate(learn_rate)
perceptron.set_max_iter(max_iter)
perceptron.set_initialize_hyperplane(False)
# Find limits for visualization
x_min = min(np.min(X[0,:]), np.min(Y[0,:]))
x_max = max(np.max(X[0,:]), np.max(Y[0,:]))
y_min = min(np.min(X[1,:]), np.min(Y[1,:]))
y_max = max(np.max(X[1,:]), np.max(Y[1,:]))
for i in xrange(nperceptrons):
# Initialize randomly weight vector and bias
perceptron.set_w(np.random.random(2))
perceptron.set_bias(np.random.random())
# Run the perceptron algorithm
perceptron.train()
# Construct the hyperplane for visualization
# Equation of the decision boundary is w^T x + b = 0
b = perceptron.get_bias()
w = perceptron.get_w()
hx = np.linspace(x_min-1,x_max+1)
hy = -w[1]/w[0] * hx
plt.plot(hx, -1/w[1]*(w[0]*hx+b))
# Plot the two-class data
plt.scatter(X[0,:], X[1,:], s=40, marker='o', facecolors='none', edgecolors='b')
plt.scatter(Y[0,:], Y[1,:], s=40, marker='s', facecolors='none', edgecolors='r')
# Customize the plot
plt.axis([x_min-1, x_max+1, y_min-1, y_max+1])
plt.title('Rosenblatt\'s Perceptron Algorithm')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
return perceptron
if __name__=='__main__':
print('Perceptron graphical')
classifier_perceptron_graphical(*parameter_list[0])
| bsd-3-clause |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Lib/site-packages/numpy/lib/histograms.py | 3 | 39375 | """
Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
import functools
import operator
import warnings
import numpy as np
from numpy.compat.py3k import basestring
from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_stone(x, range):
"""
Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
This paper by Stone appears to be the origination of this rule.
http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
range : (float, float)
The lower and upper range of the bins.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
n = x.size
ptp_x = np.ptp(x)
if n <= 1 or ptp_x == 0:
return 0
def jhat(nbins):
hh = ptp_x / nbins
p_k = np.histogram(x, bins=nbins, range=range)[0] / n
return (2 - (n + 1) * p_k.dot(p_k)) / hh
nbins_upper_bound = max(100, int(np.sqrt(n)))
nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
if nbins == nbins_upper_bound:
warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2)
return ptp_x / nbins
def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off the shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance based estimators will be of
use, so we revert to the sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x, range)
sturges_bw = _hist_bin_sturges(x, range)
del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'stone': _hist_bin_stone,
'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
# Ensure that the array is a "subtractable" dtype
if a.dtype == np.bool_:
warnings.warn("Converting input from {} to {} for compatibility."
.format(a.dtype, np.uint8),
RuntimeWarning, stacklevel=2)
a = a.astype(np.uint8)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _unsigned_subtract(a, b):
"""
Subtract two values where a >= b, and produce an unsigned result
This is needed when finding the difference between the upper and lower
bound of an int16 histogram
"""
# coerce to a single type
signed_to_unsigned = {
np.byte: np.ubyte,
np.short: np.ushort,
np.intc: np.uintc,
np.int_: np.uint,
np.longlong: np.ulonglong
}
dt = np.result_type(a, b)
try:
dt = signed_to_unsigned[dt.type]
except KeyError:
return np.subtract(a, b, dtype=dt)
else:
# we know the inputs are integers, and we are deliberately casting
# signed to unsigned
return np.subtract(a, b, casting='unsafe', dtype=dt)
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram` function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'stone'
Estimator based on leave-one-out cross-validation estimate of
the integrated squared error. Can be regarded as a generalization
of Scott's rule.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def _histogram_dispatcher(
a, bins=None, range=None, normed=None, weights=None, density=None):
return (a, bins, weights)
@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
.. deprecated:: 1.6.0
This is equivalent to the `density` argument, but produces incorrect
results for unequal bin widths. It should not be used.
.. versionchanged:: 1.15.0
DeprecationWarnings are actually emitted.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = _unsigned_subtract(tmp_a, first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
DeprecationWarning, stacklevel=2)
normed = None
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing `normed=True` on non-uniform bins has always been "
"broken, and computes neither the probability density "
"function nor the probability mass function. "
"The result is only correct if the bins are uniform, when "
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
np.VisibleDeprecationWarning, stacklevel=2)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
DeprecationWarning, stacklevel=2)
return n, bin_edges
def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
weights=None, density=None):
return (sample, bins, weights)
@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramgramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramgramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_volume``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
# handle the aliasing normed argument
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
if density:
# calculate the probability density function
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
| mit |
renegelinas/mi-instrument | mi/instrument/kut/ek60/ooicore/zplsc_echogram.py | 3 | 4958 | """
@package mi.instrument.kut.ek60.ooicore.driver
@file marine-integrations/mi/instrument/kut/ek60/ooicore/driver.py
@author Craig Risien
@brief ZPLSC Echogram generation for the ooicore
Release notes:
This class supports the generation of ZPLSC echograms.
"""
import matplotlib
matplotlib.use("Agg")
from matplotlib.dates import date2num
from datetime import datetime
import re
import numpy as np
from struct import unpack
__author__ = 'Craig Risien from OSU'
__license__ = 'Apache 2.0'
LENGTH_SIZE = 4
DATAGRAM_HEADER_SIZE = 12
CONFIG_HEADER_SIZE = 516
CONFIG_TRANSDUCER_SIZE = 320
TRANSDUCER_1 = 'Transducer # 1: '
TRANSDUCER_2 = 'Transducer # 2: '
TRANSDUCER_3 = 'Transducer # 3: '
# Reference time "seconds since 1900-01-01 00:00:00"
REF_TIME = date2num(datetime(1900, 1, 1, 0, 0, 0))
# set global regex expressions to find all sample, annotation and NMEA sentences
SAMPLE_REGEX = r'RAW\d{1}'
SAMPLE_MATCHER = re.compile(SAMPLE_REGEX, re.DOTALL)
ANNOTATE_REGEX = r'TAG\d{1}'
ANNOTATE_MATCHER = re.compile(ANNOTATE_REGEX, re.DOTALL)
NMEA_REGEX = r'NME\d{1}'
NMEA_MATCHER = re.compile(NMEA_REGEX, re.DOTALL)
###########################################################################
# ZPLSC Echogram
###########################################################################
####################################################################################
# Create functions to read the datagrams contained in the raw file. The
# code below was developed using example Matlab code produced by Lars Nonboe
# Andersen of Simrad and provided by Dr. Kelly Benoit-Bird and the
# raw data file format specification in the Simrad EK60 manual, with reference
# to code in Rick Towler's readEKraw toolbox.
def read_datagram_header(chunk):
"""
Reads the EK60 raw data file datagram header
@param chunk data chunk to read the datagram header from
@return: datagram header
"""
# setup unpack structure and field names
field_names = ('datagram_type', 'internal_time')
fmt = '<4sll'
# read in the values from the byte string chunk
values = unpack(fmt, chunk)
# the internal date time structure represents the number of 100
# nanosecond intervals since January 1, 1601. this is known as the
# Windows NT Time Format.
internal = values[2] * (2 ** 32) + values[1]
# create the datagram header dictionary
datagram_header = dict(zip(field_names, [values[0], internal]))
return datagram_header
def read_config_header(chunk):
"""
Reads the EK60 raw data file configuration header information
from the byte string passed in as a chunk
@param chunk data chunk to read the config header from
@return: configuration header
"""
# setup unpack structure and field names
field_names = ('survey_name', 'transect_name', 'sounder_name',
'version', 'transducer_count')
fmt = '<128s128s128s30s98sl'
# read in the values from the byte string chunk
values = list(unpack(fmt, chunk))
values.pop(4) # drop the spare field
# strip the trailing zero byte padding from the strings
for i in xrange(4):
values[i] = values[i].strip('\x00')
# create the configuration header dictionary
config_header = dict(zip(field_names, values))
return config_header
def read_config_transducer(chunk):
"""
Reads the EK60 raw data file configuration transducer information
from the byte string passed in as a chunk
@param chunk data chunk to read the configuration transducer information from
@return: configuration transducer information
"""
# setup unpack structure and field names
field_names = ('channel_id', 'beam_type', 'frequency', 'gain',
'equiv_beam_angle', 'beam_width_alongship', 'beam_width_athwartship',
'angle_sensitivity_alongship', 'angle_sensitivity_athwartship',
'angle_offset_alongship', 'angle_offset_athwart', 'pos_x', 'pos_y',
'pos_z', 'dir_x', 'dir_y', 'dir_z', 'pulse_length_table', 'gain_table',
'sa_correction_table', 'gpt_software_version')
fmt = '<128sl15f5f8s5f8s5f8s16s28s'
# read in the values from the byte string chunk
values = list(unpack(fmt, chunk))
# convert some of the values to arrays
pulse_length_table = np.array(values[17:22])
gain_table = np.array(values[23:28])
sa_correction_table = np.array(values[29:34])
# strip the trailing zero byte padding from the strings
for i in [0, 35]:
values[i] = values[i].strip('\x00')
# put it back together, dropping the spare strings
config_transducer = dict(zip(field_names[0:17], values[0:17]))
config_transducer[field_names[17]] = pulse_length_table
config_transducer[field_names[18]] = gain_table
config_transducer[field_names[19]] = sa_correction_table
config_transducer[field_names[20]] = values[35]
return config_transducer
| bsd-2-clause |
xubenben/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
theconnor/google-objects | tests/sheets_test.py | 3 | 1986 | import unittest
from unittest import mock
import pandas
from tests.utils import get_data
from google_objects.sheets import SheetsClient
from google_objects.sheets import Spreadsheet
from google_objects.sheets import Sheet
from google_objects.sheets import Block
# load google sheets dummy data
spreadsheet = get_data('spreadsheet')
values = get_data('range')
# initialize mock google-api-python-client resource object
mock_resource = mock.Mock()
mock_resource.spreadsheets().get().execute.return_value = spreadsheet
mock_resource.spreadsheets().values().get().execute.return_value = values
class TestSheets(unittest.TestCase):
"""Test Google Sheets objects"""
def setUp(self):
self.client = SheetsClient(mock_resource)
def test_spreadsheets(self):
spreadsheet = self.client.get_spreadsheet('abc123')
self.assertIsInstance(spreadsheet, Spreadsheet)
# test spreadsheet properties
self.assertEqual(spreadsheet.title, 'Test Google Spreadsheet')
self.assertEqual(spreadsheet.id, 'abc123')
def test_sheets(self):
spreadsheet = self.client.get_spreadsheet('abc123')
sheets = spreadsheet.sheets()
self.assertIsInstance(sheets, list)
for i, sheet in enumerate(sheets):
self.assertIsInstance(sheet, Sheet)
# test sheet properties
first_sheet = sheets[0]
self.assertEqual(first_sheet.title, 'First Sheet')
self.assertEqual(first_sheet.id, 1234)
def test_values(self):
spreadsheet = self.client.get_spreadsheet('abc123')
sheets = spreadsheet.sheets()
values = sheets[0].values()
self.assertIsInstance(values, Block)
for row in values:
self.assertIsInstance(row, list)
def test_frame(self):
spreadsheet = self.client.get_spreadsheet('abc123')
sheets = spreadsheet.sheets()
values = sheets[0].dataframe()
self.assertIsInstance(values, pandas.DataFrame)
| apache-2.0 |
tejasapatil/spark | python/pyspark/sql/utils.py | 6 | 6334 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
def require_minimum_pandas_version():
""" Raise ImportError if minimum version of Pandas is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pandas_version = "0.19.2"
from distutils.version import LooseVersion
try:
import pandas
have_pandas = True
except ImportError:
have_pandas = False
if not have_pandas:
raise ImportError("Pandas >= %s must be installed; however, "
"it was not found." % minimum_pandas_version)
if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version):
raise ImportError("Pandas >= %s must be installed; however, "
"your version was %s." % (minimum_pandas_version, pandas.__version__))
def require_minimum_pyarrow_version():
""" Raise ImportError if minimum version of pyarrow is not installed
"""
# TODO(HyukjinKwon): Relocate and deduplicate the version specification.
minimum_pyarrow_version = "0.8.0"
from distutils.version import LooseVersion
try:
import pyarrow
have_arrow = True
except ImportError:
have_arrow = False
if not have_arrow:
raise ImportError("PyArrow >= %s must be installed; however, "
"it was not found." % minimum_pyarrow_version)
if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version):
raise ImportError("PyArrow >= %s must be installed; however, "
"your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
class ForeachBatchFunction(object):
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx, func):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf, batch_id):
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ['org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction']
| apache-2.0 |
chunweiyuan/xarray | xarray/core/common.py | 1 | 39773 | from collections import OrderedDict
from contextlib import suppress
from textwrap import dedent
from typing import (Any, Callable, Hashable, Iterable, Iterator, List, Mapping,
MutableMapping, Optional, Tuple, TypeVar, Union)
import numpy as np
import pandas as pd
from . import dtypes, duck_array_ops, formatting, ops
from .arithmetic import SupportsArithmetic
from .options import _get_keep_attrs
from .pycompat import dask_array_type
from .utils import Frozen, ReprObject, SortedKeysDict, either_dict_or_kwargs
# Used as a sentinel value to indicate a all dimensions
ALL_DIMS = ReprObject('<all-dims>')
T = TypeVar('T')
class ImplementsArrayReduce:
@classmethod
def _reduce_method(cls, func: Callable, include_skipna: bool,
numeric_only: bool):
if include_skipna:
def wrapped_func(self, dim=None, axis=None, skipna=None,
**kwargs):
return self.reduce(func, dim, axis,
skipna=skipna, allow_lazy=True, **kwargs)
else:
def wrapped_func(self, dim=None, axis=None, # type: ignore
**kwargs):
return self.reduce(func, dim, axis,
allow_lazy=True, **kwargs)
return wrapped_func
_reduce_extra_args_docstring = dedent("""\
dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over axes.""")
_cum_extra_args_docstring = dedent("""\
dim : str or sequence of str, optional
Dimension over which to apply `{name}`.
axis : int or sequence of int, optional
Axis over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied.""")
class ImplementsDatasetReduce:
@classmethod
def _reduce_method(cls, func: Callable, include_skipna: bool,
numeric_only: bool):
if include_skipna:
def wrapped_func(self, dim=None, skipna=None,
**kwargs):
return self.reduce(func, dim, skipna=skipna,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
else:
def wrapped_func(self, dim=None, **kwargs): # type: ignore
return self.reduce(func, dim,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`. By default `{name}` is
applied over all dimensions."""
_cum_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension over which to apply `{name}`.
axis : int or sequence of int, optional
Axis over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied."""
class AbstractArray(ImplementsArrayReduce):
"""Shared base class for DataArray and Variable.
"""
def __bool__(self: Any) -> bool:
return bool(self.values)
def __float__(self: Any) -> float:
return float(self.values)
def __int__(self: Any) -> int:
return int(self.values)
def __complex__(self: Any) -> complex:
return complex(self.values)
def __array__(self: Any, dtype: Union[str, np.dtype, None] = None
) -> np.ndarray:
return np.asarray(self.values, dtype=dtype)
def __repr__(self) -> str:
return formatting.array_repr(self)
def _iter(self: Any) -> Iterator[Any]:
for n in range(len(self)):
yield self[n]
def __iter__(self: Any) -> Iterator[Any]:
if self.ndim == 0:
raise TypeError('iteration over a 0-d array')
return self._iter()
def get_axis_num(self, dim: Union[Hashable, Iterable[Hashable]]
) -> Union[int, Tuple[int, ...]]:
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, Iterable) and not isinstance(dim, str):
return tuple(self._get_axis_num(d) for d in dim)
else:
return self._get_axis_num(dim)
def _get_axis_num(self: Any, dim: Hashable) -> int:
try:
return self.dims.index(dim)
except ValueError:
raise ValueError("%r not found in array dimensions %r" %
(dim, self.dims))
@property
def sizes(self: Any) -> Mapping[Hashable, int]:
"""Ordered mapping from dimension names to lengths.
Immutable.
See also
--------
Dataset.sizes
"""
return Frozen(OrderedDict(zip(self.dims, self.shape)))
class AttrAccessMixin:
"""Mixin class that allows getting keys with attribute access
"""
_initialized = False
@property
def _attr_sources(self):
"""List of places to look-up items for attribute-style access"""
return []
@property
def _item_sources(self):
"""List of places to look-up items for key-autocompletion """
return []
def __getattr__(self, name: str) -> Any:
if name != '__setstate__':
# this avoids an infinite loop when pickle looks for the
# __setstate__ attribute before the xarray object is initialized
for source in self._attr_sources:
with suppress(KeyError):
return source[name]
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
def __setattr__(self, name: str, value: Any) -> None:
if self._initialized:
try:
# Allow setting instance variables if they already exist
# (e.g., _attrs). We use __getattribute__ instead of hasattr
# to avoid key lookups with attribute-style access.
self.__getattribute__(name)
except AttributeError:
raise AttributeError(
"cannot set attribute %r on a %r object. Use __setitem__ "
"style assignment (e.g., `ds['name'] = ...`) instead to "
"assign variables." % (name, type(self).__name__))
object.__setattr__(self, name, value)
def __dir__(self) -> List[str]:
"""Provide method name lookup and completion. Only provide 'public'
methods.
"""
extra_attrs = [item
for sublist in self._attr_sources
for item in sublist
if isinstance(item, str)]
return sorted(set(dir(type(self)) + extra_attrs))
def _ipython_key_completions_(self) -> List[str]:
"""Provide method for the key-autocompletions in IPython.
See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion
For the details.
""" # noqa
item_lists = [item
for sublist in self._item_sources
for item in sublist
if isinstance(item, str)]
return list(set(item_lists))
def get_squeeze_dims(xarray_obj,
dim: Union[Hashable, Iterable[Hashable], None] = None,
axis: Union[int, Iterable[int], None] = None
) -> List[Hashable]:
"""Get a list of dimensions to squeeze out.
"""
if dim is not None and axis is not None:
raise ValueError('cannot use both parameters `axis` and `dim`')
if dim is None and axis is None:
return [d for d, s in xarray_obj.sizes.items() if s == 1]
if isinstance(dim, Iterable) and not isinstance(dim, str):
dim = list(dim)
elif dim is not None:
dim = [dim]
else:
assert axis is not None
if isinstance(axis, int):
axis = [axis]
axis = list(axis)
if any(not isinstance(a, int) for a in axis):
raise TypeError(
'parameter `axis` must be int or iterable of int.')
alldims = list(xarray_obj.sizes.keys())
dim = [alldims[a] for a in axis]
if any(xarray_obj.sizes[k] > 1 for k in dim):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return dim
class DataWithCoords(SupportsArithmetic, AttrAccessMixin):
"""Shared base class for Dataset and DataArray."""
def squeeze(self, dim: Union[Hashable, Iterable[Hashable], None] = None,
drop: bool = False,
axis: Union[int, Iterable[int], None] = None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or Hashable or iterable of Hashable, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
drop : bool, optional
If ``drop=True``, drop squeezed coordinates instead of making them
scalar.
axis : None or int or iterable of int, optional
Like dim, but positional.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = get_squeeze_dims(self, dim, axis)
return self.isel(drop=drop, **{d: 0 for d in dims})
def get_index(self, key: Hashable) -> pd.Index:
"""Get an index for a dimension, with fall-back to a default RangeIndex
"""
if key not in self.dims:
raise KeyError(key)
try:
return self.indexes[key]
except KeyError:
# need to ensure dtype=int64 in case range is empty on Python 2
return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)
def _calc_assign_results(self, kwargs: Mapping[str, T]
) -> MutableMapping[str, T]:
results = SortedKeysDict() # type: SortedKeysDict[str, T]
for k, v in kwargs.items():
if callable(v):
results[k] = v(self)
else:
results[k] = v
return results
def assign_coords(self, **kwargs):
"""Assign new coordinates to this object.
Returns a new object with all the original data in addition to the new
coordinates.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on this object and assigned to new coordinate
variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Examples
--------
Convert longitude coordinates from 0-359 to -180-179:
>>> da = xr.DataArray(np.random.rand(4),
... coords=[np.array([358, 359, 0, 1])],
... dims='lon')
>>> da
<xarray.DataArray (lon: 4)>
array([0.28298 , 0.667347, 0.657938, 0.177683])
Coordinates:
* lon (lon) int64 358 359 0 1
>>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))
<xarray.DataArray (lon: 4)>
array([0.28298 , 0.667347, 0.657938, 0.177683])
Coordinates:
* lon (lon) int64 -2 -1 0 1
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign_coords``
is possible, but you cannot reference other variables created within
the same ``assign_coords`` call.
See also
--------
Dataset.assign
Dataset.swap_dims
"""
data = self.copy(deep=False)
results = self._calc_assign_results(kwargs)
data.coords.update(results)
return data
def assign_attrs(self, *args, **kwargs):
"""Assign new attrs to this object.
Returns a new object equivalent to self.attrs.update(*args, **kwargs).
Parameters
----------
args : positional arguments passed into ``attrs.update``.
kwargs : keyword arguments passed into ``attrs.update``.
Returns
-------
assigned : same type as caller
A new object with the new attrs in addition to the existing data.
See also
--------
Dataset.assign
"""
out = self.copy(deep=False)
out.attrs.update(*args, **kwargs)
return out
def pipe(self, func: Union[Callable[..., T], Tuple[Callable[..., T], str]],
*args, **kwargs) -> T:
"""
Apply func(self, *args, **kwargs)
This method replicates the pandas method of the same name.
Parameters
----------
func : function
function to apply to this xarray object (Dataset/DataArray).
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the xarray object.
args : positional arguments passed into ``func``.
kwargs : a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
xarray or pandas objects, e.g., instead of writing
>>> f(g(h(ds), arg1=a), arg2=b, arg3=c)
You can write
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.pipe
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def groupby(self, group, squeeze: bool = True):
"""Returns a GroupBy object for performing grouped operations.
Parameters
----------
group : str, DataArray or IndexVariable
Array whose unique values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
Examples
--------
Calculate daily anomalies for daily data:
>>> da = xr.DataArray(np.linspace(0, 1826, num=1827),
... coords=[pd.date_range('1/1/2000', '31/12/2004',
... freq='D')],
... dims='time')
>>> da
<xarray.DataArray (time: 1827)>
array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
>>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')
<xarray.DataArray (time: 1827)>
array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])
Coordinates:
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...
dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...
See Also
--------
core.groupby.DataArrayGroupBy
core.groupby.DatasetGroupBy
""" # noqa
return self._groupby_cls(self, group, squeeze=squeeze)
def groupby_bins(self, group, bins, right: bool = True, labels=None,
precision: int = 3, include_lowest: bool = False,
squeeze: bool = True):
"""Returns a GroupBy object for performing grouped operations.
Rather than using all unique values of `group`, the values are discretized
first by applying `pandas.cut` [1]_ to `group`.
Parameters
----------
group : str, DataArray or IndexVariable
Array whose binned values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
bins : int or array of scalars
If bins is an int, it defines the number of equal-width bins in the
range of x. However, in this case, the range of x is extended by .1%
on each side to include the min or max values of x. If bins is a
sequence it defines the bin edges allowing for non-uniform bin
width. No extension of the range of x is done in this case.
right : boolean, optional
Indicates whether the bins include the rightmost edge or not. If
right == True (the default), then the bins [1,2,3,4] indicate
(1,2], (2,3], (3,4].
labels : array or boolean, default None
Used as labels for the resulting bins. Must be of the same length as
the resulting bins. If False, string bin labels are assigned by
`pandas.cut`.
precision : int
The precision at which to store and display the bins labels.
include_lowest : bool
Whether the first interval should be left-inclusive or not.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
The name of the group has the added suffix `_bins` in order to
distinguish it from the original variable.
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html
""" # noqa
return self._groupby_cls(self, group, squeeze=squeeze, bins=bins,
cut_kwargs={'right': right, 'labels': labels,
'precision': precision,
'include_lowest': include_lowest})
def rolling(self, dim: Optional[Mapping[Hashable, int]] = None,
min_periods: Optional[int] = None, center: bool = False,
**dim_kwargs: int):
"""
Rolling window object.
Parameters
----------
dim: dict, optional
Mapping from the dimension name to create the rolling iterator
along (e.g. `time`) to its moving window size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**dim_kwargs : optional
The keyword arguments form of ``dim``.
One of dim or dim_kwargs must be provided.
Returns
-------
Rolling object (core.rolling.DataArrayRolling for DataArray,
core.rolling.DatasetRolling for Dataset.)
Examples
--------
Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:
>>> da = xr.DataArray(np.linspace(0, 11, num=12),
... coords=[pd.date_range('15/12/1999',
... periods=12, freq=pd.DateOffset(months=1))],
... dims='time')
>>> da
<xarray.DataArray (time: 12)>
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
>>> da.rolling(time=3, center=True).mean()
<xarray.DataArray (time: 12)>
array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
Remove the NaNs using ``dropna()``:
>>> da.rolling(time=3, center=True).mean().dropna('time')
<xarray.DataArray (time: 10)>
array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])
Coordinates:
* time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...
See Also
--------
core.rolling.DataArrayRolling
core.rolling.DatasetRolling
""" # noqa
dim = either_dict_or_kwargs(dim, dim_kwargs, 'rolling')
return self._rolling_cls(self, dim, min_periods=min_periods,
center=center)
def coarsen(self, dim: Optional[Mapping[Hashable, int]] = None,
boundary: str = 'exact',
side: Union[str, Mapping[Hashable, str]] = 'left',
coord_func: str = 'mean',
**dim_kwargs: int):
"""
Coarsen object.
Parameters
----------
dim: dict, optional
Mapping from the dimension name to the window size.
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
boundary : 'exact' | 'trim' | 'pad'
If 'exact', a ValueError will be raised if dimension size is not a
multiple of the window size. If 'trim', the excess entries are
dropped. If 'pad', NA will be padded.
side : 'left' or 'right' or mapping from dimension to 'left' or 'right'
coord_func: function (name) that is applied to the coordintes,
or a mapping from coordinate name to function (name).
Returns
-------
Coarsen object (core.rolling.DataArrayCoarsen for DataArray,
core.rolling.DatasetCoarsen for Dataset.)
Examples
--------
Coarsen the long time series by averaging over every four days.
>>> da = xr.DataArray(np.linspace(0, 364, num=364),
... dims='time',
... coords={'time': pd.date_range(
... '15/12/1999', periods=364)})
>>> da
<xarray.DataArray (time: 364)>
array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,
364. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12
>>>
>>> da.coarsen(time=3, boundary='trim').mean()
<xarray.DataArray (time: 121)>
array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,
361.99449 ])
Coordinates:
* time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10
>>>
See Also
--------
core.rolling.DataArrayCoarsen
core.rolling.DatasetCoarsen
"""
dim = either_dict_or_kwargs(dim, dim_kwargs, 'coarsen')
return self._coarsen_cls(
self, dim, boundary=boundary, side=side,
coord_func=coord_func)
def resample(self, indexer: Optional[Mapping[Hashable, str]] = None,
skipna=None, closed: Optional[str] = None,
label: Optional[str] = None,
base: int = 0, keep_attrs: Optional[bool] = None,
loffset=None,
**indexer_kwargs: str):
"""Returns a Resample object for performing resampling operations.
Handles both downsampling and upsampling. If any intervals contain no
values from the original object, they will be given the value ``NaN``.
Parameters
----------
indexer : {dim: freq}, optional
Mapping from the dimension name to resample frequency.
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : 'left' or 'right', optional
Side of each interval to treat as closed.
label : 'left or 'right', optional
Side of each interval to use for labeling.
base : int, optional
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '24H' frequency, base could
range from 0 through 23.
loffset : timedelta or str, optional
Offset used to adjust the resampled time labels. Some pandas date
offset strings are supported.
keep_attrs : bool, optional
If True, the object's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**indexer_kwargs : {dim: freq}
The keyword arguments form of ``indexer``.
One of indexer or indexer_kwargs must be provided.
Returns
-------
resampled : same type as caller
This object resampled.
Examples
--------
Downsample monthly time-series data to seasonal data:
>>> da = xr.DataArray(np.linspace(0, 11, num=12),
... coords=[pd.date_range('15/12/1999',
... periods=12, freq=pd.DateOffset(months=1))],
... dims='time')
>>> da
<xarray.DataArray (time: 12)>
array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...
>>> da.resample(time="QS-DEC").mean()
<xarray.DataArray (time: 4)>
array([ 1., 4., 7., 10.])
Coordinates:
* time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01
Upsample monthly time-series data to daily data:
>>> da.resample(time='1D').interpolate('linear')
<xarray.DataArray (time: 337)>
array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...
Limit scope of upsampling method
>>> da.resample(time='1D').nearest(tolerance='1D')
<xarray.DataArray (time: 337)>
array([ 0., 0., nan, ..., nan, 11., 11.])
Coordinates:
* time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
""" # noqa
# TODO support non-string indexer after removing the old API.
from .dataarray import DataArray
from .resample import RESAMPLE_DIM
from ..coding.cftimeindex import CFTimeIndex
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
# note: the second argument (now 'skipna') use to be 'dim'
if ((skipna is not None and not isinstance(skipna, bool))
or ('how' in indexer_kwargs and 'how' not in self.dims)
or ('dim' in indexer_kwargs and 'dim' not in self.dims)):
raise TypeError(
'resample() no longer supports the `how` or '
'`dim` arguments. Instead call methods on resample '
"objects, e.g., data.resample(time='1D').mean()")
indexer = either_dict_or_kwargs(indexer, indexer_kwargs, 'resample')
if len(indexer) != 1:
raise ValueError(
"Resampling only supported along single dimensions."
)
dim, freq = next(iter(indexer.items()))
dim_name = dim
dim_coord = self[dim]
if isinstance(self.indexes[dim_name], CFTimeIndex):
from .resample_cftime import CFTimeGrouper
grouper = CFTimeGrouper(freq, closed, label, base, loffset)
else:
# TODO: to_offset() call required for pandas==0.19.2
grouper = pd.Grouper(freq=freq, closed=closed, label=label,
base=base,
loffset=pd.tseries.frequencies.to_offset(
loffset))
group = DataArray(dim_coord, coords=dim_coord.coords,
dims=dim_coord.dims, name=RESAMPLE_DIM)
resampler = self._resample_cls(self, group=group, dim=dim_name,
grouper=grouper,
resample_dim=RESAMPLE_DIM)
return resampler
def where(self, cond, other=dtypes.NA, drop: bool = False):
"""Filter elements from this object according to a condition.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : DataArray or Dataset with boolean dtype
Locations at which to preserve this object's values.
other : scalar, DataArray or Dataset, optional
Value to use for locations in this object where ``cond`` is False.
By default, these locations filled with NA.
drop : boolean, optional
If True, coordinate labels that only correspond to False values of
the condition are dropped from the result. Mutually exclusive with
``other``.
Returns
-------
Same type as caller.
Examples
--------
>>> import numpy as np
>>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))
>>> a.where(a.x + a.y < 4)
<xarray.DataArray (x: 5, y: 5)>
array([[ 0., 1., 2., 3., nan],
[ 5., 6., 7., nan, nan],
[ 10., 11., nan, nan, nan],
[ 15., nan, nan, nan, nan],
[ nan, nan, nan, nan, nan]])
Dimensions without coordinates: x, y
>>> a.where(a.x + a.y < 5, -1)
<xarray.DataArray (x: 5, y: 5)>
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, -1],
[10, 11, 12, -1, -1],
[15, 16, -1, -1, -1],
[20, -1, -1, -1, -1]])
Dimensions without coordinates: x, y
>>> a.where(a.x + a.y < 4, drop=True)
<xarray.DataArray (x: 4, y: 4)>
array([[ 0., 1., 2., 3.],
[ 5., 6., 7., nan],
[ 10., 11., nan, nan],
[ 15., nan, nan, nan]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
where : equivalent function
"""
from .alignment import align
from .dataarray import DataArray
from .dataset import Dataset
if drop:
if other is not dtypes.NA:
raise ValueError('cannot set `other` if drop=True')
if not isinstance(cond, (Dataset, DataArray)):
raise TypeError("cond argument is %r but must be a %r or %r" %
(cond, Dataset, DataArray))
# align so we can use integer indexing
self, cond = align(self, cond)
# get cond with the minimal size needed for the Dataset
if isinstance(cond, Dataset):
clipcond = cond.to_array().any('variable')
else:
clipcond = cond
# clip the data corresponding to coordinate dims that are not used
nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))
indexers = {k: np.unique(v) for k, v in nonzeros}
self = self.isel(**indexers)
cond = cond.isel(**indexers)
return ops.where_method(self, cond, other)
def close(self: Any) -> None:
"""Close any files linked to this object
"""
if self._file_obj is not None:
self._file_obj.close()
self._file_obj = None
def isin(self, test_elements):
"""Tests each value in the array for whether it is in test elements.
Parameters
----------
test_elements : array_like
The values against which to test each value of `element`.
This argument is flattened if an array or array_like.
See numpy notes for behavior with non-array-like parameters.
Returns
-------
isin : same as object, bool
Has the same shape as this object.
Examples
--------
>>> array = xr.DataArray([1, 2, 3], dims='x')
>>> array.isin([1, 3])
<xarray.DataArray (x: 3)>
array([ True, False, True])
Dimensions without coordinates: x
See also
--------
numpy.isin
"""
from .computation import apply_ufunc
from .dataset import Dataset
from .dataarray import DataArray
from .variable import Variable
if isinstance(test_elements, Dataset):
raise TypeError(
'isin() argument must be convertible to an array: {}'
.format(test_elements))
elif isinstance(test_elements, (Variable, DataArray)):
# need to explicitly pull out data to support dask arrays as the
# second argument
test_elements = test_elements.data
return apply_ufunc(
duck_array_ops.isin,
self,
kwargs=dict(test_elements=test_elements),
dask='allowed',
)
def __enter__(self: T) -> T:
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.close()
def full_like(other, fill_value, dtype: Union[str, np.dtype, None] = None):
"""Return a new object with the same shape and type as a given object.
Parameters
----------
other : DataArray, Dataset, or Variable
The reference object in input
fill_value : scalar
Value to fill the new object with before returning it.
dtype : dtype, optional
dtype of the new array. If omitted, it defaults to other.dtype.
Returns
-------
out : same as object
New object with the same shape and type as other, with the data
filled with fill_value. Coords will be copied from other.
If other is based on dask, the new one will be as well, and will be
split in the same chunks.
"""
from .dataarray import DataArray
from .dataset import Dataset
from .variable import Variable
if isinstance(other, Dataset):
data_vars = OrderedDict(
(k, _full_like_variable(v, fill_value, dtype))
for k, v in other.data_vars.items())
return Dataset(data_vars, coords=other.coords, attrs=other.attrs)
elif isinstance(other, DataArray):
return DataArray(
_full_like_variable(other.variable, fill_value, dtype),
dims=other.dims, coords=other.coords, attrs=other.attrs,
name=other.name)
elif isinstance(other, Variable):
return _full_like_variable(other, fill_value, dtype)
else:
raise TypeError("Expected DataArray, Dataset, or Variable")
def _full_like_variable(other, fill_value,
dtype: Union[str, np.dtype, None] = None):
"""Inner function of full_like, where other must be a variable
"""
from .variable import Variable
if isinstance(other.data, dask_array_type):
import dask.array
if dtype is None:
dtype = other.dtype
data = dask.array.full(other.shape, fill_value, dtype=dtype,
chunks=other.data.chunks)
else:
data = np.full_like(other, fill_value, dtype=dtype)
return Variable(dims=other.dims, data=data, attrs=other.attrs)
def zeros_like(other, dtype: Union[str, np.dtype, None] = None):
"""Shorthand for full_like(other, 0, dtype)
"""
return full_like(other, 0, dtype)
def ones_like(other, dtype: Union[str, np.dtype, None] = None):
"""Shorthand for full_like(other, 1, dtype)
"""
return full_like(other, 1, dtype)
def is_np_datetime_like(dtype: Union[str, np.dtype]) -> bool:
"""Check if a dtype is a subclass of the numpy datetime types
"""
return (np.issubdtype(dtype, np.datetime64) or
np.issubdtype(dtype, np.timedelta64))
def _contains_cftime_datetimes(array) -> bool:
"""Check if an array contains cftime.datetime objects
"""
try:
from cftime import datetime as cftime_datetime
except ImportError:
return False
else:
if array.dtype == np.dtype('O') and array.size > 0:
sample = array.ravel()[0]
if isinstance(sample, dask_array_type):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime_datetime)
else:
return False
def contains_cftime_datetimes(var) -> bool:
"""Check if an xarray.Variable contains cftime.datetime objects
"""
return _contains_cftime_datetimes(var.data)
def _contains_datetime_like_objects(var) -> bool:
"""Check if a variable contains datetime like objects (either
np.datetime64, np.timedelta64, or cftime.datetime)
"""
return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)
| apache-2.0 |
kendricktan/rarepepes | pix2pix/trainer.py | 1 | 6406 | import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import random
import numpy as np
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from pycrayon import CrayonClient
from PIL import Image
from torch.autograd import Variable
from models import Generator, Discriminator
from tqdm import tqdm
from utils import normalize
class Pix2PixTrainer:
def __init__(self, nic, noc, ngf, ndf, beta=0.5, lamb=100, lr=1e-3, cuda=True, crayon=False):
"""
Args:
nic: Number of input channel
noc: Number of output channels
ngf: Number of generator filters
ndf: Number of discriminator filters
lamb: Weight on L1 term in objective
"""
self.cuda = cuda
self.start_epoch = 0
self.crayon = crayon
if crayon:
self.cc = CrayonClient(hostname="localhost", port=8889)
try:
self.logger = self.cc.create_experiment('pix2pix')
except:
self.cc.remove_experiment('pix2pix')
self.logger = self.cc.create_experiment('pix2pix')
self.gen = self.cudafy(Generator(nic, noc, ngf))
self.dis = self.cudafy(Discriminator(nic, noc, ndf))
# Optimizers for generators
self.gen_optim = self.cudafy(optim.Adam(
self.gen.parameters(), lr=lr, betas=(beta, 0.999)))
# Optimizers for discriminators
self.dis_optim = self.cudafy(optim.Adam(
self.dis.parameters(), lr=lr, betas=(beta, 0.999)))
# Loss functions
self.criterion_bce = nn.BCELoss()
self.criterion_mse = nn.MSELoss()
self.criterion_l1 = nn.L1Loss()
self.lamb = lamb
def train(self, loader, c_epoch):
self.dis.train()
self.gen.train()
self.reset_gradients()
max_idx = len(loader)
for idx, features in enumerate(tqdm(loader)):
orig_x = Variable(self.cudafy(features[0]))
orig_y = Variable(self.cudafy(features[1]))
""" Discriminator """
# Train with real
self.dis.volatile = False
dis_real = self.dis(torch.cat((orig_x, orig_y), 1))
real_labels = Variable(self.cudafy(
torch.ones(dis_real.size())
))
dis_real_loss = self.criterion_bce(
dis_real, real_labels)
# Train with fake
gen_y = self.gen(orig_x)
dis_fake = self.dis(torch.cat((orig_x, gen_y.detach()), 1))
fake_labels = Variable(self.cudafy(
torch.zeros(dis_fake.size())
))
dis_fake_loss = self.criterion_bce(
dis_fake, fake_labels)
# Update weights
dis_loss = dis_real_loss + dis_fake_loss
dis_loss.backward()
self.dis_optim.step()
self.reset_gradients()
""" Generator """
self.dis.volatile = True
dis_real = self.dis(torch.cat((orig_x, gen_y), 1))
real_labels = Variable(self.cudafy(
torch.ones(dis_real.size())
))
gen_loss = self.criterion_bce(dis_real, real_labels) + \
self.lamb * self.criterion_l1(gen_y, orig_y)
gen_loss.backward()
self.gen_optim.step()
# Pycrayon or nah
if self.crayon:
self.logger.add_scalar_value('pix2pix_gen_loss', gen_loss.data[0])
self.logger.add_scalar_value('pix2pix_dis_loss', dis_loss.data[0])
if idx % 50 == 0:
tqdm.write('Epoch: {} [{}/{}]\t'
'D Loss: {:.4f}\t'
'G Loss: {:.4f}'.format(
c_epoch, idx, max_idx, dis_loss.data[0], gen_loss.data[0]
))
def test(self, loader, e):
self.dis.eval()
self.gen.eval()
topilimg = transforms.ToPILImage()
if not os.path.exists('visualize/'):
os.makedirs('visualize/')
idx = random.randint(0, len(loader) - 1)
_features = loader.dataset[idx]
orig_x = Variable(self.cudafy(_features[0]))
orig_y = Variable(self.cudafy(_features[1]))
orig_x = orig_x.view(1, orig_x.size(0), orig_x.size(1), orig_x.size(2))
orig_y = orig_y.view(1, orig_y.size(0), orig_y.size(1), orig_x.size(3))
gen_y = self.gen(orig_x)
if self.cuda:
orig_x_np = normalize(orig_x.squeeze().cpu().data, 0, 1)
orig_y_np = normalize(orig_y.squeeze().cpu().data, 0, 1)
gen_y_np = normalize(gen_y.squeeze().cpu().data, 0, 1)
else:
orig_x_np = normalize(orig_x.squeeze().data, 0, 1)
orig_y_np = normalize(orig_y.squeeze().data, 0, 1)
gen_y_np = normalize(gen_y.squeeze().data, 0, 1)
orig_x_np = topilimg(orig_x_np)
orig_y_np = topilimg(orig_y_np)
gen_y_np = topilimg(gen_y_np)
f, (ax1, ax2, ax3) = plt.subplots(
3, 1, sharey='row'
)
ax1.imshow(orig_x_np)
ax1.set_title('x')
ax2.imshow(orig_y_np)
ax2.set_title('target y')
ax3.imshow(gen_y_np)
ax3.set_title('generated y')
f.savefig('visualize/{}.png'.format(e))
def save(self, e, filename='rarepepe_weights.tar'):
torch.save({
'gen': self.gen.state_dict(),
'dis': self.dis.state_dict(),
'epoch': e + 1
}, 'epoch{}_{}'.format(e, filename))
print('Saved model state')
def load(self, filedir):
if os.path.isfile(filedir):
checkpoint = torch.load(filedir)
self.gen.load_state_dict(checkpoint['gen'])
self.dis.load_state_dict(checkpoint['dis'])
self.start_epoch = checkpoint['epoch']
print('Model state loaded')
else:
print('Cant find file')
def reset_gradients(self):
"""
Helper function to reset gradients
"""
self.gen.zero_grad()
self.dis.zero_grad()
def cudafy(self, t):
"""
Helper function to cuda-fy our graphs
"""
if self.cuda:
if hasattr(t, 'cuda'):
return t.cuda()
return t
| mit |
zaxtax/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
tkoziara/parmec | tests/spring_chain_critical.py | 1 | 1395 | # PARMEC test --> SPRING command test: chain of cubes with critical damping
matnum = MATERIAL (1E3, 1E9, 0.25)
prevpar = -1
dratio0 = 1./2.**0.5 # these values are hand picked so that per-particle
dratio1 = 0.5 # damping ratios are equal to 1.0
for i in range (0, 10):
nodes = [i, i, i,
i+1, i, i,
i+1, i+1, i,
i, i+1, i,
i, i, i+1,
i+1, i, i+1,
i+1, i+1, i+1,
i, i+1, i+1]
elements = [8, 0, 1, 2, 3, 4, 5, 6, 7, matnum]
colors = [1, 4, 0, 1, 2, 3, 2, 4, 4, 5, 6, 7, 3]
parnum = MESH (nodes, elements, matnum, colors)
if i: SPRING (parnum, (i, i, i), prevpar, (i, i, i), [-1,-1E7, 1,1E7], dratio0)
prevpar = parnum
sprnum = SPRING (parnum, (i+1, i+1, i+1), -1, (i+1, i+1, i+1), [-1,-1E7, 1,1E7], dratio1)
GRAVITY (0., 0., -10.)
t = HISTORY ('TIME')
z = HISTORY ('PZ', parnum)
hcri = CRITICAL()
print 'Critical time step estimate:', hcri
(hslst, hplst) = CRITICAL(sprnum+1, parnum)
print 'Per-spring time step list:', hslst
print 'Per-particle time step list:', hplst
h = hplst[0][0]
print 'Adopted time step:', h
DEM (10.0, h, (0.05, h))
print 'Generating time-z(center) plot ...'
try:
import matplotlib.pyplot as plt
plt.clf ()
plt.plot (t, z)
plt.xlim ((0, t[-1]))
plt.xlabel ('time $(s)$')
plt.ylabel ('z(center) $(m)$')
plt.savefig ('tests/spring_chain_critical_z.png')
except:
print 't = ', t
print 'z = ', z
| mit |
rbberger/lammps | examples/SPIN/test_problems/validation_damped_exchange/llg_exchange.py | 7 | 2668 | #!/usr/bin/env python3
import numpy as np , pylab, tkinter
import math
import matplotlib.pyplot as plt
import mpmath as mp
hbar=0.658212 # Planck's constant (eV.fs/rad)
J0=0.05 # per-neighbor exchange interaction (eV)
# exchange interaction parameters
J1 = 11.254 # in eV
J2 = 0.0 # adim
J3 = 1.0 # in Ang.
# initial spins
S1 = np.array([1.0, 0.0, 0.0])
S2 = np.array([0.0, 1.0, 0.0])
alpha=0.01 # damping coefficient
pi=math.pi
N=30000 # number of timesteps
dt=0.1 # timestep (fs)
# Rodrigues rotation formula
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians
"""
axis = np.asarray(axis)
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
#Definition of the Bethe-Slater function
def func_BS(x,a,b,c):
return 4*a*((x/c)**2)*(1-b*(x/c)**2)*np.exp(-(x/c)**2)
#Definition of the derivative of the Bethe-Slater function
def func_dBS(x,a,b,c):
return 4*a*((x/c)**2)*(1-b*(x/c)**2)*np.exp(-(x/c)**2)
# calculating precession field of spin Sr
def calc_rot_vector(Sr,Sf):
rot = (J0/hbar)*(Sf-alpha*np.cross(Sf,Sr))/(1.0+alpha**2)
return rot
# second-order ST decomposition as implemented in LAMMPS
for t in range (0,N):
# advance s1 by dt/4
wf1 = calc_rot_vector(S1,S2)
theta=dt*np.linalg.norm(wf1)*0.25
axis=wf1/np.linalg.norm(wf1)
S1 = np.dot(rotation_matrix(axis, theta), S1)
# advance s2 by dt/2
wf2 = calc_rot_vector(S2,S1)
theta=dt*np.linalg.norm(wf2)*0.5
axis=wf2/np.linalg.norm(wf2)
S2 = np.dot(rotation_matrix(axis, theta), S2)
# advance s1 by dt/2
wf1 = calc_rot_vector(S1,S2)
theta=dt*np.linalg.norm(wf1)*0.5
axis=wf1/np.linalg.norm(wf1)
S1 = np.dot(rotation_matrix(axis, theta), S1)
# advance s2 by dt/2
wf2 = calc_rot_vector(S2,S1)
theta=dt*np.linalg.norm(wf2)*0.5
axis=wf2/np.linalg.norm(wf2)
S2 = np.dot(rotation_matrix(axis, theta), S2)
# advance s1 by dt/4
wf1 = calc_rot_vector(S1,S2)
theta=dt*np.linalg.norm(wf1)*0.25
axis=wf1/np.linalg.norm(wf1)
S1 = np.dot(rotation_matrix(axis, theta), S1)
# calc. average magnetization
Sm = (S1+S2)*0.5
# calc. energy
en = -J0*(np.dot(S1,S2))
# print res. in ps for comparison with LAMMPS
print(t*dt/1000.0,Sm[0],Sm[1],Sm[2],en)
| gpl-2.0 |
geektoni/shogun | examples/undocumented/python/graphical/so_multiclass_director_BMRM.py | 11 | 4332 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from shogun import RealFeatures
from shogun import MulticlassModel, MulticlassSOLabels, RealNumber, DualLibQPBMSOSVM, DirectorStructuredModel
from shogun import BMRM, PPBMRM, P3BMRM, ResultSet, RealVector
from shogun import StructuredAccuracy
class MulticlassStructuredModel(DirectorStructuredModel):
def __init__(self,features,labels):
DirectorStructuredModel.__init__(self)
self.set_features(features)
self.set_labels(labels)
self.dim = features.get_dim_feature_space()*labels.get_num_classes()
self.n_classes = labels.get_num_classes()
self.n_feats = features.get_dim_feature_space()
#self.use_director_risk()
def get_dim(self):
return self.dim
def argmax(self,w,feat_idx,training):
feature_vector = self.get_features().get_feature_vector(feat_idx)
label = None
if training == True:
label = int(RealNumber.obtain_from_generic(self.get_labels().get_label(feat_idx)).value)
ypred = 0
max_score = -1e10
for c in xrange(self.n_classes):
score = 0.0
for i in xrange(self.n_feats):
score += w[i+self.n_feats*c]*feature_vector[i]
if training == True:
score += (c!=label)
if score > max_score:
max_score = score
ypred = c
res = ResultSet()
res.score = max_score
res.psi_pred = RealVector(self.dim)
res.psi_pred.zero()
for i in xrange(self.n_feats):
res.psi_pred[i+self.n_feats*ypred] = feature_vector[i]
res.argmax = RealNumber(ypred)
if training == True:
res.delta = (label!=ypred)
res.psi_truth = RealVector(self.dim)
res.psi_truth.zero()
for i in xrange(self.n_feats):
res.psi_truth[i+self.n_feats*label] = feature_vector[i]
for i in xrange(self.n_feats):
res.score -= w[i+self.n_feats*label]*feature_vector[i]
return res
def fill_data(cnt, minv, maxv):
x1 = np.linspace(minv, maxv, cnt)
a, b = np.meshgrid(x1, x1)
X = np.array((np.ravel(a), np.ravel(b)))
y = np.zeros((1, cnt*cnt))
tmp = cnt*cnt;
y[0, tmp/3:(tmp/3)*2]=1
y[0, tmp/3*2:(tmp/3)*3]=2
return X, y.flatten()
def gen_data():
covs = np.array([[[0., -1. ], [2.5, .7]],
[[3., -1.5], [1.2, .3]],
[[ 2, 0 ], [ .0, 1.5 ]]])
X = np.r_[np.dot(np.random.randn(N, dim), covs[0]) + np.array([0, 10]),
np.dot(np.random.randn(N, dim), covs[1]) + np.array([-10, -10]),
np.dot(np.random.randn(N, dim), covs[2]) + np.array([10, -10])];
Y = np.hstack((np.zeros(N), np.ones(N), 2*np.ones(N)))
return X, Y
def get_so_labels(out):
N = out.get_num_labels()
l = np.zeros(N)
for i in xrange(N):
l[i] = RealNumber.obtain_from_generic(out.get_label(i)).value
return l
# Number of classes
M = 3
# Number of samples of each class
N = 10
# Dimension of the data
dim = 2
X, y = gen_data()
cnt = 50
X2, y2 = fill_data(cnt, np.min(X), np.max(X))
labels = MulticlassSOLabels(y)
features = RealFeatures(X.T)
model = MulticlassStructuredModel(features, labels)
lambda_ = 1e1
sosvm = DualLibQPBMSOSVM(model, labels, lambda_)
sosvm.set_cleanAfter(10) # number of iterations that cutting plane has to be inactive for to be removed
sosvm.set_cleanICP(True) # enables inactive cutting plane removal feature
sosvm.set_TolRel(0.001) # set relative tolerance
sosvm.set_verbose(True) # enables verbosity of the solver
sosvm.set_cp_models(16) # set number of cutting plane models
sosvm.set_solver(BMRM) # select training algorithm
#sosvm.set_solver(PPBMRM)
#sosvm.set_solver(P3BMRM)
sosvm.train()
res = sosvm.get_result()
Fps = res.get_hist_Fp_vector()
Fds = res.get_hist_Fd_vector()
wdists = res.get_hist_wdist_vector()
plt.figure()
plt.subplot(221)
plt.title('Fp and Fd history')
plt.plot(xrange(res.get_n_iters()), Fps, hold=True)
plt.plot(xrange(res.get_n_iters()), Fds, hold=True)
plt.subplot(222)
plt.title('w dist history')
plt.plot(xrange(res.get_n_iters()), wdists)
# Evaluation
out = sosvm.apply()
Evaluation = StructuredAccuracy()
acc = Evaluation.evaluate(out, labels)
print "Correct classification rate: %0.4f%%" % ( 100.0*acc )
# show figure
Z = get_so_labels(sosvm.apply(RealFeatures(X2)))
x = (X2[0,:]).reshape(cnt, cnt)
y = (X2[1,:]).reshape(cnt, cnt)
z = Z.reshape(cnt, cnt)
plt.subplot(223)
plt.pcolor(x, y, z)
plt.contour(x, y, z, linewidths=1, colors='black', hold=True)
plt.plot(X[:,0], X[:,1], 'yo')
plt.axis('tight')
plt.title('Classification')
plt.show()
| bsd-3-clause |
trhongbinwang/data_science_journey | deep_learning/keras/examples/mnist_swwae.py | 4 | 7761 | '''Trains a stacked what-where autoencoder built on residual blocks on the
MNIST dataset. It exemplifies two influential methods that have been developed
in the past few years.
The first is the idea of properly 'unpooling.' During any max pool, the
exact location (the 'where') of the maximal value in a pooled receptive field
is lost, however it can be very useful in the overall reconstruction of an
input image. Therefore, if the 'where' is handed from the encoder
to the corresponding decoder layer, features being decoded can be 'placed' in
the right location, allowing for reconstructions of much higher fidelity.
References:
[1]
'Visualizing and Understanding Convolutional Networks'
Matthew D Zeiler, Rob Fergus
https://arxiv.org/abs/1311.2901v3
[2]
'Stacked What-Where Auto-encoders'
Junbo Zhao, Michael Mathieu, Ross Goroshin, Yann LeCun
https://arxiv.org/abs/1506.02351v8
The second idea exploited here is that of residual learning. Residual blocks
ease the training process by allowing skip connections that give the network
the ability to be as linear (or non-linear) as the data sees fit. This allows
for much deep networks to be easily trained. The residual element seems to
be advantageous in the context of this example as it allows a nice symmetry
between the encoder and decoder. Normally, in the decoder, the final
projection to the space where the image is reconstructed is linear, however
this does not have to be the case for a residual block as the degree to which
its output is linear or non-linear is determined by the data it is fed.
However, in order to cap the reconstruction in this example, a hard softmax is
applied as a bias because we know the MNIST digits are mapped to [0,1].
References:
[3]
'Deep Residual Learning for Image Recognition'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1512.03385v1
[4]
'Identity Mappings in Deep Residual Networks'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1603.05027v3
'''
from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Activation
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D
from keras.layers import Input, BatchNormalization, ELU
import matplotlib.pyplot as plt
import keras.backend as K
from keras import layers
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
"""The proposed residual block from [4].
Running with elu=True will use ELU nonlinearity and running with
elu=False will use BatchNorm + RELU nonlinearity. While ELU's are fast
due to the fact they do not suffer from BatchNorm overhead, they may
overfit because they do not offer the stochastic element of the batch
formation process of BatchNorm, which acts as a good regularizer.
# Arguments
x: 4D tensor, the tensor to feed through the block
nfeats: Integer, number of feature maps for conv layers.
ksize: Integer, width and height of conv kernels in first convolution.
nskipped: Integer, number of conv layers for the residual function.
elu: Boolean, whether to use ELU or BN+RELU.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
`(batch, filters, rows, cols)`
"""
y0 = Conv2D(nfeats, ksize, padding='same')(x)
y = y0
for i in range(nskipped):
if elu:
y = ELU()(y)
else:
y = BatchNormalization(axis=1)(y)
y = Activation('relu')(y)
y = Conv2D(nfeats, 1, padding='same')(y)
return layers.add([y0, y])
def getwhere(x):
''' Calculate the 'where' mask that contains switches indicating which
index contained the max value when MaxPool2D was applied. Using the
gradient of the sum is a nice trick to keep everything high level.'''
y_prepool, y_postpool = x
return K.gradients(K.sum(y_postpool), y_prepool)
if K.backend() == 'tensorflow':
raise RuntimeError('This example can only run with the '
'Theano backend for the time being, '
'because it requires taking the gradient '
'of a gradient, which isn\'t '
'supported for all TF ops.')
# This example assume 'channels_first' data format.
K.set_image_data_format('channels_first')
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# The size of the kernel used for the MaxPooling2D
pool_size = 2
# The total number of feature maps at each layer
nfeats = [8, 16, 32, 64, 128]
# The sizes of the pooling kernel at each layer
pool_sizes = np.array([1, 1, 1, 1, 1]) * pool_size
# The convolution kernel size
ksize = 3
# Number of epochs to train for
epochs = 5
# Batch size during training
batch_size = 128
if pool_size == 2:
# if using a 5 layer net of pool_size = 2
x_train = np.pad(x_train, [[0, 0], [0, 0], [2, 2], [2, 2]],
mode='constant')
x_test = np.pad(x_test, [[0, 0], [0, 0], [2, 2], [2, 2]], mode='constant')
nlayers = 5
elif pool_size == 3:
# if using a 3 layer net of pool_size = 3
x_train = x_train[:, :, :-1, :-1]
x_test = x_test[:, :, :-1, :-1]
nlayers = 3
else:
import sys
sys.exit('Script supports pool_size of 2 and 3.')
# Shape of input to train on (note that model is fully convolutional however)
input_shape = x_train.shape[1:]
# The final list of the size of axis=1 for all layers, including input
nfeats_all = [input_shape[0]] + nfeats
# First build the encoder, all the while keeping track of the 'where' masks
img_input = Input(shape=input_shape)
# We push the 'where' masks to the following list
wheres = [None] * nlayers
y = img_input
for i in range(nlayers):
y_prepool = convresblock(y, nfeats=nfeats_all[i + 1], ksize=ksize)
y = MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i]))(y_prepool)
wheres[i] = layers.Lambda(
getwhere, output_shape=lambda x: x[0])([y_prepool, y])
# Now build the decoder, and use the stored 'where' masks to place the features
for i in range(nlayers):
ind = nlayers - 1 - i
y = UpSampling2D(size=(pool_sizes[ind], pool_sizes[ind]))(y)
y = layers.multiply([y, wheres[ind]])
y = convresblock(y, nfeats=nfeats_all[ind], ksize=ksize)
# Use hard_simgoid to clip range of reconstruction
y = Activation('hard_sigmoid')(y)
# Define the model and it's mean square error loss, and compile it with Adam
model = Model(img_input, y)
model.compile('adam', 'mse')
# Fit the model
model.fit(x_train, x_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, x_test))
# Plot
x_recon = model.predict(x_test[:25])
x_plot = np.concatenate((x_test[:25], x_recon), axis=1)
x_plot = x_plot.reshape((5, 10, input_shape[-2], input_shape[-1]))
x_plot = np.vstack([np.hstack(x) for x in x_plot])
plt.figure()
plt.axis('off')
plt.title('Test Samples: Originals/Reconstructions')
plt.imshow(x_plot, interpolation='none', cmap='gray')
plt.savefig('reconstructions.png')
| apache-2.0 |
janvanrijn/openml-pimp | examples/animation/generate_svm_decision_boundary_gif.py | 1 | 4543 | import argparse
import matplotlib.pyplot as plt
import matplotlib.animation
import numpy as np
import os
import sklearn.svm
import sklearn.datasets
def parse_args():
parser = argparse.ArgumentParser(description='Generate svm boundary gif')
# output file related
parser.add_argument('--output_dir', type=str, default=os.path.expanduser('~') + '/experiments/pimp/movies')
parser.add_argument('--filename', type=str, default='svm_decision_boundary')
# input dataset related
parser.add_argument('--num_datapoints', type=int, default=400)
parser.add_argument('--data_factor', type=float, default=.2)
parser.add_argument('--data_noise', type=float, default=.25)
# gamma hyperparameter related
parser.add_argument('--param_min', type=int, default=-3)
parser.add_argument('--param_max', type=int, default=8)
parser.add_argument('--param_interval', type=float, default=0.02)
# plot
parser.add_argument('--plot_margin', type=int, default=0.1)
parser.add_argument('--interval', type=int, default=10)
# output video params
parser.add_argument('--gif', action='store_true', default=False)
parser.add_argument('--dpi', type=int, default=180)
parser.add_argument('--fps', type=int, default=30)
parser.add_argument('--bitrate', type=int, default=1800)
return parser.parse_args()
def num_frames():
return int(np.floor((args.param_max - args.param_min) / args.param_interval))
def data_gen():
for gamma in np.arange(args.param_min, args.param_max, args.param_interval):
yield (gamma, 1)
def make_meshgrid(x, y, margin, delta=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - margin, x.max() + margin
y_min, y_max = y.min() - margin, y.max() + margin
xx, yy = np.meshgrid(np.arange(x_min, x_max, delta),
np.arange(y_min, y_max, delta))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def plot_svm(params):
print(params)
gamma_exp, C_exp = params
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
model = sklearn.svm.SVC(kernel='rbf', gamma=2.0**gamma_exp, C=2.0**C_exp)
model.fit(X, y)
performance = sklearn.metrics.accuracy_score(y, model.predict(X))
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1, args.plot_margin)
ax.clear()
plot_contours(ax, model, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.set_title('Support Vector Machine')
ax.text(x=xx[0, 0] + args.plot_margin, y=yy[-1, -1] - args.plot_margin,
s='Gamma: $2^{%0.2f}$\nComplexity: $2^{%0.0f}$\nAccuracy: %0.2f' % (gamma_exp, C_exp, performance),
verticalalignment='top', horizontalalignment='left')
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='black')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('$X_1$')
ax.set_ylabel('$X_2$')
if __name__ == '__main__':
args = parse_args()
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(1, 1, 1)
np.random.seed(0)
X, y = sklearn.datasets.make_circles(n_samples=args.num_datapoints, factor=args.data_factor, noise=args.data_noise)
ani = matplotlib.animation.FuncAnimation(fig, plot_svm, data_gen,
blit=False, interval=args.interval, repeat=False, save_count=num_frames())
os.makedirs(args.output_dir, exist_ok=True)
if args.gif:
ani.save(os.path.join(args.output_dir, args.filename + '.gif'), dpi=args.dpi, writer='imagemagick')
else:
Writer = matplotlib.animation.writers['ffmpeg']
writer = Writer(fps=args.fps, metadata=dict(artist='Jan N. van Rijn and Frank Hutter'), bitrate=args.bitrate)
ani.save(os.path.join(args.output_dir, args.filename + '.mp4'), dpi=args.dpi, writer=writer)
print('Done')
| bsd-3-clause |
iABC2XYZ/abc | tmp/GetHist_Now.py | 1 | 2628 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
@author: Peiyong Jiang
作者: 姜培勇
[email protected]
本文件解释:
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tushare as ts
dayUse=30
dayPre=10
flagUpdate=1
dayCut=dayUse+dayPre
if flagUpdate==1:
allToday=ts.get_today_all()
codes=allToday.code.values
iCodeCount=0
iLineCount=0
iFid=0
fML_pre='ml.pre'
fidP=open(fML_pre,'w+')
fML_code='ml.code'
fidC=open(fML_code,'w+')
fML_train='ml_0.train'
fidT=open(fML_train,'w+')
fML_label='ml_0.label'
fidL=open(fML_label,'w+')
for iCode in codes:
try:
codeData=ts.get_hist_data(iCode)
except:
continue
try:
codeLen,codeWidth=codeData.shape
except:
continue
if codeLen<dayCut:
continue
if codeWidth!=14:
continue
codeOpen=codeData.open.values
codeClose=codeData.close.values
codeHigh=codeData.high.values
codeLow=codeData.low.values
codeTurnover=codeData.turnover.values
for iDayCut in xrange(codeLen-1,dayCut-2,-1):
for iDayUse in xrange(dayUse):
idDayUse=iDayCut-iDayUse
fidT.writelines('%.2f ' %codeOpen[idDayUse])
fidT.writelines('%.2f ' %codeClose[idDayUse])
fidT.writelines('%.2f ' %codeHigh[idDayUse])
fidT.writelines('%.2f ' %codeLow[idDayUse])
fidT.writelines('%.2f ' %codeTurnover[idDayUse])
for iDayPre in xrange(dayPre):
idDayPre=iDayCut-dayUse-iDayPre
fidL.writelines('%.2f ' %codeHigh[idDayPre])
fidT.writelines('\n')
fidL.writelines('\n')
iLineCount+=1
if iLineCount % 50000 ==0:
fidT.close()
fidL.close()
iFid+=1
fML_train_N=fML_train.replace('0',str(iFid))
fML_label_N=fML_label.replace('0',str(iFid))
fidT=open(fML_train_N,'w+')
fidL=open(fML_label_N,'w+')
for iPre in xrange(-dayUse,0):
fidP.writelines('%.2f ' %codeOpen[iPre])
fidP.writelines('%.2f ' %codeClose[iPre])
fidP.writelines('%.2f ' %codeHigh[iPre])
fidP.writelines('%.2f ' %codeLow[iPre])
fidP.writelines('%.2f ' %codeTurnover[iPre])
fidP.writelines('\n')
fidC.writelines(iCode+'\n')
iCodeCount+=1
print iCodeCount,iLineCount
fidT.close()
fidL.close()
fidP.close()
fidC.close()
| gpl-3.0 |
sumspr/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
AshleySetter/datahandling | tests/test_optoanalysis.py | 3 | 10147 | import matplotlib
matplotlib.use('agg', warn=False, force=True)
import pytest
import optoanalysis
import numpy as np
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
plot_similarity_tolerance = 30
float_relative_tolerance = 1e-3
def test_load_data():
"""
Tests that load_data works and therefore that DataObject.__init__, DataObject.get_time_data and DataObject.getPSD work. Specifically it checks that the data loads and that it returns an object of type DataObject. It checks that the filepath points to the correct place. The data is sampled at the correct frequency and therefore that it has loaded the times correctly. It checks that the max frequency in the PSD is approximately equal to the Nyquist frequency for the test data. It also checks that the data returned by get_time_data matches the data loaded.
"""
data = optoanalysis.load_data("testData.raw")
assert type(data) == optoanalysis.optoanalysis.DataObject
assert data.filename == "testData.raw"
time = data.time.get_array()
assert time[1]-time[0] == pytest.approx(1/data.SampleFreq, rel=float_relative_tolerance)
assert max(data.freqs) == pytest.approx(data.SampleFreq/2, rel=0.00001) # max freq in PSD is approx equal to Nyquist frequency
data.load_time_data()
t, V = data.get_time_data()
np.testing.assert_array_equal(t, time)
np.testing.assert_array_equal(V, data.voltage)
optoanalysis.load_data("testData.raw", ObjectType="thermo") #testing specifically for init of ThermoObject here
return None
GlobalData = optoanalysis.load_data("testData.raw", NPerSegmentPSD=int(1e5)) # Load data to be used in upcoming tests - so that it doesn't need to be loaded for each individual function to be tested
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance) # this decorator compares the figure object returned by the following function to the baseline png image stored in tests/baseline
def test_plot_PSD():
"""
This tests that the plot of the PSD produced by DataObject.plot_PSD is produced correctly and matches the baseline to a certain tolerance.
"""
fig, ax = GlobalData.plot_PSD([0, 400], show_fig=False)
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance) # this decorator compares the figure object returned by the following function to the baseline png image stored in tests/baseline
def test_get_fit():
"""
Tests that DataObject.get_fit works and therefore tests fitPSD, fit_curvefit and PSD_Fitting as these are dependancies. It tests that the output values of the fitting are correct (both the values and thier errors) and that the plot looks the same as the baseline, within a certain tolerance.
"""
A, F, Gamma, fig, ax = GlobalData.get_fit(75000, 10000, show_fig=False)
assert A.n == pytest.approx(584418711252, rel=float_relative_tolerance)
assert F.n == pytest.approx(466604, rel=float_relative_tolerance)
assert Gamma.n == pytest.approx(3951.716, rel=float_relative_tolerance)
assert A.std_dev == pytest.approx(5827258935, rel=float_relative_tolerance)
assert F.std_dev == pytest.approx(50.3576, rel=float_relative_tolerance)
assert Gamma.std_dev == pytest.approx(97.5671, rel=float_relative_tolerance)
return fig
def test_extract_parameters():
"""
Tests that DataObject.extract_parameters works and returns the correct values.
"""
with open("testDataPressure.dat", 'r') as file:
for line in file:
pressure = float(line.split("mbar")[0])
R, M, ConvFactor = GlobalData.extract_parameters(pressure, 0.15)
#assert R.n == pytest.approx(3.27536e-8, rel=float_relative_tolerance)
#assert M.n == pytest.approx(3.23808e-19, rel=float_relative_tolerance)
#assert ConvFactor.n == pytest.approx(190629, rel=float_relative_tolerance)
#assert R.std_dev == pytest.approx(4.97914e-9, rel=float_relative_tolerance)
#assert M.std_dev == pytest.approx(9.84496e-20, rel=float_relative_tolerance)
#assert ConvFactor.std_dev == pytest.approx(58179.9, rel=float_relative_tolerance)
return None
def test_get_time_data():
"""
Tests that DataObject.get_time_data returns the correct number of values.
"""
t, v = GlobalData.get_time_data(timeStart=0, timeEnd=1e-3)
assert len(t) == len(v)
assert len(t) == 10000
return None
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_plot_time_data():
"""
This tests that the plot of the time trace (from -1ms to 1ms) produced by DataObject.plot_time_data is produced correctly and matches the baseline to a certain tolerance.
"""
fig, ax = GlobalData.plot_time_data(timeStart=-1e-3, timeEnd=1e-3, units='ms', show_fig=False)
return fig
def test_calc_area_under_PSD():
"""
This tests that the calculation of the area under the PSD
from 50 to 100 KHz, calculated by
DataObject.calc_area_under_PSD is unchanged.
"""
TrueArea = 1.6900993420543872e-06
area = GlobalData.calc_area_under_PSD(50e3, 100e3)
assert area == pytest.approx(TrueArea, rel=float_relative_tolerance)
return None
def test_get_fit_auto():
"""
This tests that DataObect.get_fit_auto continues to return the same
values as when the test was created, to within the set tolerance.
"""
ATrue = 466612.80058291875
AErrTrue = 54.936633293369404
OmegaTrapTrue = 583205139563.28
OmegaTrapErrTrue = 7359927227.585048
BigGammaTrue = 3946.998785496495
BigGammaErrTrue = 107.96706466271127
A, OmegaTrap, BigGamma, _, _ = GlobalData.get_fit_auto(70e3, MakeFig=False, show_fig=False)
assert A.n == pytest.approx(ATrue, rel=float_relative_tolerance)
assert OmegaTrap.n == pytest.approx(OmegaTrapTrue, rel=float_relative_tolerance)
assert BigGamma.n == pytest.approx(BigGammaTrue, rel=float_relative_tolerance)
assert A.std_dev == pytest.approx(AErrTrue, rel=float_relative_tolerance)
assert OmegaTrap.std_dev == pytest.approx(OmegaTrapErrTrue, rel=float_relative_tolerance)
assert BigGamma.std_dev == pytest.approx(BigGammaErrTrue, rel=float_relative_tolerance)
return None
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_extract_motion():
"""
Tests the DataObject.extract_motion function, and therefore
the get_ZXY_data function and get_ZXY_freqs function.
"""
expectedLength = int(np.floor(len(GlobalData.time)/3))
z, x, y, t, fig, ax = GlobalData.extract_ZXY_motion([75e3, 167e3, 185e3], 5e3, [15e3, 15e3, 15e3], 3, NPerSegmentPSD=int(1e5), MakeFig=True, show_fig=False)
assert len(z) == len(t)
assert len(z) == len(x)
assert len(x) == len(y)
assert len(z) == expectedLength
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_plot_phase_space():
"""
Test the plot_phase_space and therefore calc_phase_space function.
"""
fig1, axscatter, axhistx, axhisty, cb = GlobalData.plot_phase_space(75e3, GlobalData.ConvFactor, PeakWidth=10e3, ShowPSD=False, show_fig=False, FractionOfSampleFreq=3)
return fig1
def test_multi_load_data():
"""
Tests the multi_load_data function, checks that the data
is loaded correctly by checking various properties are set.
"""
data = optoanalysis.multi_load_data(1, [1, 36], [0])
assert data[0].filename == "CH1_RUN00000001_REPEAT0000.raw"
assert data[1].filename == "CH1_RUN00000036_REPEAT0000.raw"
for dataset in data:
assert type(dataset) == optoanalysis.optoanalysis.DataObject
time = dataset.time.get_array()
assert time[1]-time[0] == pytest.approx(1/dataset.SampleFreq, rel=float_relative_tolerance)
assert max(dataset.freqs) == pytest.approx(dataset.SampleFreq/2, rel=0.00001) # max freq in PSD is approx equal to Nyquist frequency
return None
GlobalMultiData = optoanalysis.multi_load_data(1, [1, 36], [0], NPerSegmentPSD=int(1e5)) # Load data to be used in upcoming tests - so that it doesn't need to be loaded for each individual function to be tested
def test_calc_temp():
"""
Tests calc_temp by calculating the temperature of the
z degree of data from it's reference.
"""
for dataset in GlobalMultiData:
dataset.get_fit_auto(65e3, MakeFig=False, show_fig=False)
T = optoanalysis.calc_temp(GlobalMultiData[0], GlobalMultiData[1])
assert T.n == pytest.approx(2.6031509367704735, rel=float_relative_tolerance)
assert T.std_dev == pytest.approx(0.21312482508893446, rel=float_relative_tolerance)
return None
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_multi_plot_PSD():
"""
This tests that the plot of PSD for the 2 datasets (from 0 to 300KHz)
produced by DataObject.multi_plot_PSD is produced correctly and matches the
baseline to a certain tolerance.
"""
fig, ax = optoanalysis.multi_plot_PSD(GlobalMultiData, xlim=[0, 300], units="kHz", LabelArray=["Reference", "Cooled"], ColorArray=["red", "blue"], alphaArray=[0.8, 0.8], show_fig=False)
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_multi_plot_time():
"""
This tests that the plot of time for the 2 datasets (from -1000us to 1000us)
produced by DataObject.multi_plot_time is produced correctly and matches the
baseline to a certain tolerance.
"""
fig, ax = optoanalysis.multi_plot_time(GlobalMultiData, SubSampleN=1, units='us', xlim=[-1000, 1000], LabelArray=["Reference", "Cooled"], show_fig=False)
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_multi_subplots_time():
"""
This tests that the plots of time for the 2 datasets (from -1000us to 1000us)
produced by DataObject.multi_subplots_time is produced correctly and matches the
baseline to a certain tolerance.
"""
fig, ax = optoanalysis.multi_subplots_time(GlobalMultiData, SubSampleN=1, units='us', xlim=[-1000, 1000], LabelArray=["Reference", "Cooled"], show_fig=False)
return fig
| mit |
bolmez/Class-HARK | ConsumptionSaving/ConsMedModel.py | 1 | 68136 | '''
Consumption-saving models that also include medical spending.
'''
import sys
sys.path.insert(0,'../')
import numpy as np
from scipy.optimize import brentq
from HARKcore import HARKobject
from HARKutilities import approxLognormal, addDiscreteOutcomeConstantMean, CRRAutilityP_inv,\
CRRAutility, CRRAutility_inv, CRRAutility_invP, CRRAutilityPP,\
makeGridExpMult, NullFunc
from HARKsimulation import drawLognormal
from ConsIndShockModel import ConsumerSolution
from HARKinterpolation import BilinearInterpOnInterp1D, TrilinearInterp, BilinearInterp, CubicInterp,\
LinearInterp, LowerEnvelope3D, UpperEnvelope, LinearInterpOnInterp1D
from ConsPersistentShockModel import ConsPersistentShockSolver, PersistentShockConsumerType,\
ValueFunc2D, MargValueFunc2D, MargMargValueFunc2D, \
VariableLowerBoundFunc2D
from copy import copy, deepcopy
utility_inv = CRRAutility_inv
utilityP_inv = CRRAutilityP_inv
utility = CRRAutility
utility_invP = CRRAutility_invP
utilityPP = CRRAutilityPP
class MedShockPolicyFunc(HARKobject):
'''
Class for representing the policy function in the medical shocks model: opt-
imal consumption and medical care for given market resources, permanent income,
and medical need shock. Always obeys Con + MedPrice*Med = optimal spending.
'''
distance_criteria = ['xFunc','cFunc','MedPrice']
def __init__(self,xFunc,xLvlGrid,MedShkGrid,MedPrice,CRRAcon,CRRAmed,xLvlCubicBool=False,
MedShkCubicBool=False):
'''
Make a new MedShockPolicyFunc.
Parameters
----------
xFunc : np.array
Optimal total spending as a function of market resources, permanent
income, and the medical need shock.
xLvlGrid : np.array
1D array of total expenditure levels.
MedShkGrid : np.array
1D array of medical shocks.
MedPrice : float
Relative price of a unit of medical care.
CRRAcon : float
Coefficient of relative risk aversion for consumption.
CRRAmed : float
Coefficient of relative risk aversion for medical care.
xLvlCubicBool : boolean
Indicator for whether cubic spline interpolation (rather than linear)
should be used in the xLvl dimension.
MedShkCubicBool : boolean
Indicator for whether bicubic interpolation should be used; only
operative when xLvlCubicBool=True.
Returns
-------
None
'''
# Store some of the inputs in self
self.MedPrice = MedPrice
self.xFunc = xFunc
# Calculate optimal consumption at each combination of mLvl and MedShk.
cLvlGrid = np.zeros((xLvlGrid.size,MedShkGrid.size)) # Initialize consumption grid
for i in range(xLvlGrid.size):
xLvl = xLvlGrid[i]
for j in range(MedShkGrid.size):
MedShk = MedShkGrid[j]
if xLvl == 0: # Zero consumption when mLvl = 0
cLvl = 0.0
elif MedShk == 0: # All consumption when MedShk = 0
cLvl = xLvl
else:
optMedZeroFunc = lambda c : (MedShk/MedPrice)**(-1.0/CRRAcon)*\
((xLvl-c)/MedPrice)**(CRRAmed/CRRAcon) - c
cLvl = brentq(optMedZeroFunc,0.0,xLvl) # Find solution to FOC
cLvlGrid[i,j] = cLvl
# Construct the consumption function and medical care function
if xLvlCubicBool:
if MedShkCubicBool:
raise NotImplementedError(), 'Bicubic interpolation not yet implemented'
else:
xLvlGrid_tiled = np.tile(np.reshape(xLvlGrid,(xLvlGrid.size,1)),
(1,MedShkGrid.size))
MedShkGrid_tiled = np.tile(np.reshape(MedShkGrid,(1,MedShkGrid.size)),
(xLvlGrid.size,1))
dfdx = (CRRAmed/(CRRAcon*MedPrice))*(MedShkGrid_tiled/MedPrice)**(-1.0/CRRAcon)*\
((xLvlGrid_tiled - cLvlGrid)/MedPrice)**(CRRAmed/CRRAcon - 1.0)
dcdx = dfdx/(dfdx + 1.0)
dcdx[0,:] = dcdx[1,:] # approximation; function goes crazy otherwise
dcdx[:,0] = 1.0 # no Med when MedShk=0, so all x is c
cFromxFunc_by_MedShk = []
for j in range(MedShkGrid.size):
cFromxFunc_by_MedShk.append(CubicInterp(xLvlGrid,cLvlGrid[:,j],dcdx[:,j]))
cFunc = LinearInterpOnInterp1D(cFromxFunc_by_MedShk,MedShkGrid)
else:
cFunc = BilinearInterp(cLvlGrid,xLvlGrid,MedShkGrid)
self.cFunc = cFunc
def __call__(self,mLvl,pLvl,MedShk):
'''
Evaluate optimal consumption and medical care at given levels of market
resources, permanent income, and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
cLvl : np.array
Optimal consumption for each point in (xLvl,MedShk).
Med : np.array
Optimal medical care for each point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
cLvl = self.cFunc(xLvl,MedShk)
Med = (xLvl-cLvl)/self.MedPrice
return cLvl,Med
def derivativeX(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption and medical care with respect to
market resources at given levels of market resources, permanent income,
and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdm : np.array
Derivative of consumption with respect to market resources for each
point in (xLvl,MedShk).
dMeddm : np.array
Derivative of medical care with respect to market resources for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdm = self.xFunc.derivativeX(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdm = dxdm*dcdx
dMeddm = (dxdm - dcdm)/self.MedPrice
return dcdm,dMeddm
def derivativeY(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption and medical care with respect to
permanent income at given levels of market resources, permanent income,
and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdp : np.array
Derivative of consumption with respect to permanent income for each
point in (xLvl,MedShk).
dMeddp : np.array
Derivative of medical care with respect to permanent income for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdp = self.xFunc.derivativeY(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdp = dxdp*dcdx
dMeddp = (dxdp - dcdp)/self.MedPrice
return dcdp,dMeddp
def derivativeZ(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption and medical care with respect to
medical need shock at given levels of market resources, permanent income,
and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdShk : np.array
Derivative of consumption with respect to medical need for each
point in (xLvl,MedShk).
dMeddShk : np.array
Derivative of medical care with respect to medical need for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdShk = self.xFunc.derivativeZ(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdShk = dxdShk*dcdx + self.cFunc.derivativeY(xLvl,MedShk)
dMeddShk = (dxdShk - dcdShk)/self.MedPrice
return dcdShk,dMeddShk
class cThruXfunc(HARKobject):
'''
Class for representing consumption function derived from total expenditure
and consumption.
'''
distance_criteria = ['xFunc','cFunc']
def __init__(self,xFunc,cFunc):
'''
Make a new instance of MedFromXfunc.
Parameters
----------
xFunc : function
Optimal total spending as a function of market resources, permanent
income, and the medical need shock.
cFunc : function
Optimal consumption as a function of total spending and the medical
need shock.
Returns
-------
None
'''
self.xFunc = xFunc
self.cFunc = cFunc
def __call__(self,mLvl,pLvl,MedShk):
'''
Evaluate optimal consumption at given levels of market resources, perma-
nent income, and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
cLvl : np.array
Optimal consumption for each point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
cLvl = self.cFunc(xLvl,MedShk)
return cLvl
def derivativeX(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption with respect to market resources
at given levels of market resources, permanent income, and medical need
shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdm : np.array
Derivative of consumption with respect to market resources for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdm = self.xFunc.derivativeX(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdm = dxdm*dcdx
return dcdm
def derivativeY(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption and medical care with respect to
permanent income at given levels of market resources, permanent income,
and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdp : np.array
Derivative of consumption with respect to permanent income for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdp = self.xFunc.derivativeY(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdp = dxdp*dcdx
return dcdp
def derivativeZ(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption and medical care with respect to
medical need shock at given levels of market resources, permanent income,
and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdShk : np.array
Derivative of consumption with respect to medical need for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdShk = self.xFunc.derivativeZ(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdShk = dxdShk*dcdx + self.cFunc.derivativeY(xLvl,MedShk)
return dcdShk
class MedThruXfunc(HARKobject):
'''
Class for representing medical care function derived from total expenditure
and consumption.
'''
distance_criteria = ['xFunc','cFunc','MedPrice']
def __init__(self,xFunc,cFunc,MedPrice):
'''
Make a new instance of MedFromXfunc.
Parameters
----------
xFunc : function
Optimal total spending as a function of market resources, permanent
income, and the medical need shock.
cFunc : function
Optimal consumption as a function of total spending and the medical
need shock.
MedPrice : float
Relative price of a unit of medical care.
Returns
-------
None
'''
self.xFunc = xFunc
self.cFunc = cFunc
self.MedPrice = MedPrice
def __call__(self,mLvl,pLvl,MedShk):
'''
Evaluate optimal medical care at given levels of market resources,
permanent income, and medical need shock.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
Med : np.array
Optimal medical care for each point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
Med = (xLvl-self.cFunc(xLvl,MedShk))/self.MedPrice
return Med
def derivativeX(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of consumption and medical care with respect to
market resources at given levels of market resources, permanent income,
and medical need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dcdm : np.array
Derivative of consumption with respect to market resources for each
point in (xLvl,MedShk).
dMeddm : np.array
Derivative of medical care with respect to market resources for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdm = self.xFunc.derivativeX(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdm = dxdm*dcdx
dMeddm = (dxdm - dcdm)/self.MedPrice
return dcdm,dMeddm
def derivativeY(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of medical care with respect to permanent income
at given levels of market resources, permanent income, and medical need
shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dMeddp : np.array
Derivative of medical care with respect to permanent income for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdp = self.xFunc.derivativeY(mLvl,pLvl,MedShk)
dMeddp = (dxdp - dxdp*self.cFunc.derivativeX(xLvl,MedShk))/self.MedPrice
return dMeddp
def derivativeZ(self,mLvl,pLvl,MedShk):
'''
Evaluate the derivative of medical care with respect to medical need
shock at given levels of market resources, permanent income, and medical
need shocks.
Parameters
----------
mLvl : np.array
Market resource levels.
pLvl : np.array
Permanent income levels; should be same size as mLvl.
MedShk : np.array
Medical need shocks; should be same size as mLvl.
Returns
-------
dMeddShk : np.array
Derivative of medical care with respect to medical need for each
point in (xLvl,MedShk).
'''
xLvl = self.xFunc(mLvl,pLvl,MedShk)
dxdShk = self.xFunc.derivativeZ(mLvl,pLvl,MedShk)
dcdx = self.cFunc.derivativeX(xLvl,MedShk)
dcdShk = dxdShk*dcdx + self.cFunc.derivativeY(xLvl,MedShk)
dMeddShk = (dxdShk - dcdShk)/self.MedPrice
return dMeddShk
class VariableLowerBoundFunc3D(HARKobject):
'''
A class for representing a function with three real inputs whose lower bound
in the first input depends on the second input. Useful for managing curved
natural borrowing constraints.
'''
distance_criteria = ['func','lowerBound']
def __init__(self,func,lowerBound):
'''
Make a new instance of VariableLowerBoundFunc3D.
Parameters
----------
func : function
A function f: (R_+ x R^2) --> R representing the function of interest
shifted by its lower bound in the first input.
lowerBound : function
The lower bound in the first input of the function of interest, as
a function of the second input.
Returns
-------
None
'''
self.func = func
self.lowerBound = lowerBound
def __call__(self,x,y,z):
'''
Evaluate the function at given state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
f_out : np.array
Function evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
f_out = self.func(x-xShift,y,z)
return f_out
def derivativeX(self,x,y,z):
'''
Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdx_out = self.func.derivativeX(x-xShift,y,z)
return dfdx_out
def derivativeY(self,x,y,z):
'''
Evaluate the first derivative with respect to y of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdy_out : np.array
First derivative of function with respect to the second input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift,xShiftDer = self.lowerBound.eval_with_derivative(y)
dfdy_out = self.func.derivativeY(x-xShift,y,z) - \
xShiftDer*self.func.derivativeX(x-xShift,y,z)
return dfdy_out
def derivativeZ(self,x,y,z):
'''
Evaluate the first derivative with respect to z of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
z : np.array
Third input values; should be of same shape as x.
Returns
-------
dfdz_out : np.array
First derivative of function with respect to the third input,
evaluated at (x,y,z), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdz_out = self.func.derivativeZ(x-xShift,y,z)
return dfdz_out
###############################################################################
class MedShockConsumerType(PersistentShockConsumerType):
'''
A class to represent agents who consume two goods: ordinary composite consumption
and medical care; both goods yield CRRAutility, and the coefficients on the
goods might be different. Agents expect to receive shocks to permanent and
transitory income as well as multiplicative shocks to utility from medical care.
'''
def __init__(self,cycles=1,time_flow=True,**kwds):
'''
Instantiate a new ConsumerType with given data, and construct objects
to be used during solution (income distribution, assets grid, etc).
See ConsumerParameters.init_med_shock for a dictionary of the keywords
that should be passed to the constructor.
Parameters
----------
cycles : int
Number of times the sequence of periods should be solved.
time_flow : boolean
Whether time is currently "flowing" forward for this instance.
Returns
-------
None
'''
PersistentShockConsumerType.__init__(self,**kwds)
self.solveOnePeriod = solveConsMedShock # Choose correct solver
self.addToTimeInv('CRRAmed')
self.addToTimeVary('MedPrice')
def update(self):
'''
Update the income process, the assets grid, the permanent income grid,
the medical shock distribution, and the terminal solution.
Parameters
----------
none
Returns
-------
none
'''
self.updateIncomeProcess()
self.updateAssetsGrid()
self.updatePermIncGrid()
self.updateMedShockProcess()
self.updateSolutionTerminal()
def updateMedShockProcess(self):
'''
Constructs discrete distributions of medical preference shocks for each
period in the cycle. Distributions are saved as attribute MedShkDstn,
which is added to time_vary.
Parameters
----------
None
Returns
-------
None
'''
MedShkDstn = [] # empty list for medical shock distribution each period
for t in range(self.T_total):
MedShkAvgNow = self.MedShkAvg[t] # get shock distribution parameters
MedShkStdNow = self.MedShkStd[t]
MedShkDstnNow = approxLognormal(mu=np.log(MedShkAvgNow)-0.5*MedShkStdNow**2,\
sigma=MedShkStdNow,N=self.MedShkCount, tail_N=self.MedShkCountTail,
tail_bound=[0,0.9])
MedShkDstnNow = addDiscreteOutcomeConstantMean(MedShkDstnNow,0.0,0.0,sort=True) # add point at zero with no probability
MedShkDstn.append(MedShkDstnNow)
self.MedShkDstn = MedShkDstn
self.addToTimeVary('MedShkDstn')
def updateSolutionTerminal(self):
'''
Update the terminal period solution for this type. Similar to other models,
optimal behavior involves spending all available market resources; however,
the agent must split his resources between consumption and medical care.
Parameters
----------
None
Returns:
--------
None
'''
# Take last period data, whichever way time is flowing
if self.time_flow:
MedPrice = self.MedPrice[-1]
MedShkVals = self.MedShkDstn[-1][1]
MedShkPrbs = self.MedShkDstn[-1][0]
else:
MedPrice = self.MedPrice[0]
MedShkVals = self.MedShkDstn[0][1]
MedShkPrbs = self.MedShkDstn[0][0]
# Initialize grids of medical need shocks, market resources, and optimal consumption
MedShkGrid = MedShkVals
xLvlMin = np.min(self.aXtraGrid)*np.min(self.pLvlGrid)
xLvlMax = np.max(self.aXtraGrid)*np.max(self.pLvlGrid)
xLvlGrid = makeGridExpMult(xLvlMin, xLvlMax, 3*self.aXtraGrid.size, 8)
trivial_grid = np.array([0.0,1.0]) # Trivial grid
# Make the policy functions for the terminal period
xFunc_terminal = TrilinearInterp(np.array([[[0.0,0.0],[0.0,0.0]],[[1.0,1.0],[1.0,1.0]]]),\
trivial_grid,trivial_grid,trivial_grid)
policyFunc_terminal = MedShockPolicyFunc(xFunc_terminal,xLvlGrid,MedShkGrid,MedPrice,
self.CRRA,self.CRRAmed,xLvlCubicBool=self.CubicBool)
cFunc_terminal = cThruXfunc(xFunc_terminal,policyFunc_terminal.cFunc)
MedFunc_terminal = MedThruXfunc(xFunc_terminal,policyFunc_terminal.cFunc,MedPrice)
# Calculate optimal consumption on a grid of market resources and medical shocks
mLvlGrid = xLvlGrid
mLvlGrid_tiled = np.tile(np.reshape(mLvlGrid,(mLvlGrid.size,1)),(1,MedShkGrid.size))
pLvlGrid_tiled = np.ones_like(mLvlGrid_tiled) # permanent income irrelevant in terminal period
MedShkGrid_tiled = np.tile(np.reshape(MedShkVals,(1,MedShkGrid.size)),(mLvlGrid.size,1))
cLvlGrid,MedGrid = policyFunc_terminal(mLvlGrid_tiled,pLvlGrid_tiled,MedShkGrid_tiled)
# Integrate marginal value across shocks to get expected marginal value
vPgrid = cLvlGrid**(-self.CRRA)
vPgrid[np.isinf(vPgrid)] = 0.0 # correct for issue at bottom edges
PrbGrid = np.tile(np.reshape(MedShkPrbs,(1,MedShkGrid.size)),(mLvlGrid.size,1))
vP_expected = np.sum(vPgrid*PrbGrid,axis=1)
# Construct the marginal (marginal) value function for the terminal period
vPnvrs = vP_expected**(-1.0/self.CRRA)
vPnvrs[0] = 0.0
vPnvrsFunc = BilinearInterp(np.tile(np.reshape(vPnvrs,(vPnvrs.size,1)),
(1,trivial_grid.size)),mLvlGrid,trivial_grid)
vPfunc_terminal = MargValueFunc2D(vPnvrsFunc,self.CRRA)
vPPfunc_terminal = MargMargValueFunc2D(vPnvrsFunc,self.CRRA)
# Integrate value across shocks to get expected value
vGrid = utility(cLvlGrid,gam=self.CRRA) + MedShkGrid_tiled*utility(MedGrid,gam=self.CRRAmed)
vGrid[:,0] = utility(cLvlGrid[:,0],gam=self.CRRA) # correct for issue when MedShk=0
vGrid[np.isinf(vGrid)] = 0.0 # correct for issue at bottom edges
v_expected = np.sum(vGrid*PrbGrid,axis=1)
# Construct the value function for the terminal period
vNvrs = utility_inv(v_expected,gam=self.CRRA)
vNvrs[0] = 0.0
vNvrsP = vP_expected*utility_invP(v_expected,gam=self.CRRA) # NEED TO FIGURE OUT MPC MAX IN THIS MODEL
vNvrsP[0] = 0.0
tempFunc = CubicInterp(mLvlGrid,vNvrs,vNvrsP)
vNvrsFunc = LinearInterpOnInterp1D([tempFunc,tempFunc],trivial_grid)
vFunc_terminal = ValueFunc2D(vNvrsFunc,self.CRRA)
# Make the terminal period solution
self.solution_terminal.cFunc = cFunc_terminal
self.solution_terminal.MedFunc = MedFunc_terminal
self.solution_terminal.policyFunc = policyFunc_terminal
self.solution_terminal.vPfunc = vPfunc_terminal
self.solution_terminal.vFunc = vFunc_terminal
self.solution_terminal.vPPfunc = vPPfunc_terminal
self.solution_terminal.hNrm = 0.0 # Don't track normalized human wealth
self.solution_terminal.hLvl = lambda p : np.zeros_like(p) # But do track absolute human wealth by permanent income
self.solution_terminal.mLvlMin = lambda p : np.zeros_like(p) # And minimum allowable market resources by perm inc
def updatePermIncGrid(self):
'''
Update the grid of permanent income levels. Currently only works for
infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not
clear what to do about cycles>1. Identical to version in persistent
shocks model, but pLvl=0 is manually added to the grid (because there is
no closed form lower-bounding cFunc for pLvl=0).
Parameters
----------
None
Returns
-------
None
'''
# Run basic version of this method
PersistentShockConsumerType.updatePermIncGrid(self)
for j in range(len(self.pLvlGrid)): # Then add 0 to the bottom of each pLvlGrid
this_grid = self.pLvlGrid[j]
self.pLvlGrid[j] = np.insert(this_grid,0,0.0)
def makeMedShkHist(self):
'''
Makes a history of simulated medical need shocks for this consumer type by
drawing from the true continuous distribution of medical shocks.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
self.resetRNG()
# Initialize the shock history
MedShkHist = np.zeros((self.sim_periods,self.Nagents)) + np.nan
t_idx = 0
# Loop through each simulated period
for t in range(self.sim_periods):
MedShkAvg = self.MedShkAvg[t_idx]
MedShkStd = self.MedShkStd[t_idx]
MedShkHist[t,:] = drawLognormal(N=self.Nagents,mu=np.log(MedShkAvg)-0.5*MedShkStd**2,\
sigma=MedShkStd,seed=self.RNG.randint(0,2**31-1))
# Advance the time index, looping if we've run out of income distributions
t_idx += 1
if t_idx >= len(self.MedShkAvg):
t_idx = 0
# Store the results as attributes of self and restore time to its original flow
self.MedShkHist = MedShkHist
if not orig_time:
self.timeRev()
def advanceIncShks(self):
'''
Advance the permanent and transitory income shocks to the next period of
the shock history objects, after first advancing the medical need shocks.
Parameters
----------
None
Returns
-------
None
'''
self.MedShkNow = self.MedShkHist[self.Shk_idx,:]
self.advanceMedPrice()
PersistentShockConsumerType.advanceIncShks(self)
def advancecFunc(self):
'''
Advance the consumption function and medical spending function to the
next period in the solution.
Parameters
----------
None
Returns
-------
None
'''
self.policyFuncNow = self.solution[self.cFunc_idx].policyFunc
self.cFunc_idx += 1
if self.cFunc_idx >= len(self.solution):
self.cFunc_idx = 0 # Reset to zero if we've run out of cFuncs
def advanceMedPrice(self):
'''
Updates the variable MedPriceNow to the current period in a simulation.
Parameters
----------
None
Returns
-------
None
'''
self.MedPriceNow = self.MedPrice[self.Price_idx]
self.Price_idx += 1
if self.Price_idx == len(self.MedPrice):
self.Price_idx = 0
def initializeSim(self,a_init=None,p_init=None,t_init=0,sim_prds=None):
'''
Readies this type for simulation by clearing its history, initializing
state variables, and setting time indices to their correct position.
Extends version in ConsIndShockModel by also tracking medical care.
Parameters
----------
a_init : np.array
Array of initial end-of-period assets at the beginning of the sim-
ulation. Should be of size self.Nagents. If omitted, will default
to values in self.a_init (which are all 0 by default).
p_init : np.array
Array of initial permanent income levels at the beginning of the sim-
ulation. Should be of size self.Nagents. If omitted, will default
to values in self.p_init (which are all 1 by default).
t_init : int
Period of life in which to begin the simulation. Defaults to 0.
sim_prds : int
Number of periods to simulate. Defaults to the length of the trans-
itory income shock history.
Returns
-------
None
'''
PersistentShockConsumerType.initializeSim(self,a_init,p_init,t_init,sim_prds)
self.Price_idx = 0
self.MedHist = copy(self.pHist)
def simOnePrd(self):
'''
Simulate a single period of a consumption-saving model with permanent
and transitory income shocks, with permanent income explcitly included
as an argument to the consumption function.
Parameters
----------
none
Returns
-------
none
'''
# Simulate the mortality process, replacing some agents with "newborns"
self.simMortality()
# Unpack objects from self for convenience
aPrev = self.aNow
pPrev = self.pNow
TranShkNow = self.TranShkNow
PermShkNow = self.PermShkNow
MedShkNow = self.MedShkNow
RfreeNow = self.RfreeNow
policyFuncNow = self.policyFuncNow
MedPrice = self.MedPriceNow
# Get correlation coefficient for permanent income
Corr = self.PermIncCorr
# Simulate the period
pNow = pPrev**Corr*PermShkNow # Updated permanent income level
bNow = RfreeNow*aPrev # Bank balances before labor income
mNow = bNow + TranShkNow*pNow # Market resources after income
cNow,MedNow = policyFuncNow(mNow,pNow,MedShkNow)# Consumption and medical care
aNow = mNow - cNow - MedPrice*MedNow # Assets after all actions are accomplished
# Store the new state and control variables
self.pNow = pNow
self.bNow = bNow
self.mNow = mNow
self.cNow = cNow
self.MedNow = MedNow
self.aNow = aNow
self.MPCnow = np.zeros_like(cNow) # skip this for now
###############################################################################
class ConsMedShockSolver(ConsPersistentShockSolver):
'''
Class for solving the one period problem for the "medical shocks" model, in
which consumers receive shocks to permanent and transitory income as well as
shocks to "medical need"-- multiplicative utility shocks for a second good.
'''
def __init__(self,solution_next,IncomeDstn,MedShkDstn,LivPrb,DiscFac,CRRA,CRRAmed,Rfree,MedPrice,
PermGroFac,PermIncCorr,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Constructor for a new solver for a one period problem with idiosyncratic
shocks to permanent and transitory income and shocks to medical need.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
MedShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter for med-
ical care. Order: probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion for composite consumption.
CRRAmed : float
Coefficient of relative risk aversion for medical care.
Rfree : float
Risk free interest factor on end-of-period assets.
MedPrice : float
Price of unit of medical care relative to unit of consumption.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
PermIncCorr : float
Correlation of permanent income from period to period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
None
'''
ConsPersistentShockSolver.__init__(self,solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rfree,
PermGroFac,PermIncCorr,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool)
self.MedShkDstn = MedShkDstn
self.MedPrice = MedPrice
self.CRRAmed = CRRAmed
def setAndUpdateValues(self,solution_next,IncomeDstn,LivPrb,DiscFac):
'''
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, medical shocks and probabilities, next
period's marginal value function (etc), the probability of getting the
worst income shock next period, the patience factor, human wealth, and
the bounding MPCs.
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
Returns
-------
None
'''
# Run basic version of this method
ConsPersistentShockSolver.setAndUpdateValues(self,self.solution_next,self.IncomeDstn,
self.LivPrb,self.DiscFac)
# Also unpack the medical shock distribution
self.MedShkPrbs = self.MedShkDstn[0]
self.MedShkVals = self.MedShkDstn[1]
def defUtilityFuncs(self):
'''
Defines CRRA utility function for this period (and its derivatives,
and their inverses), saving them as attributes of self for other methods
to use. Extends version from ConsIndShock models by also defining inverse
marginal utility function over medical care.
Parameters
----------
none
Returns
-------
none
'''
ConsPersistentShockSolver.defUtilityFuncs(self) # Do basic version
self.uMedPinv = lambda Med : utilityP_inv(Med,gam=self.CRRAmed)
self.uMed = lambda Med : utility(Med,gam=self.CRRAmed)
self.uMedPP = lambda Med : utilityPP(Med,gam=self.CRRAmed)
def defBoroCnst(self,BoroCnstArt):
'''
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self. Uses the artificial and natural borrowing constraints.
Parameters
----------
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable (normalized) assets
to end the period with. If it is less than the natural borrowing
constraint at a particular permanent income level, then it is irrelevant;
BoroCnstArt=None indicates no artificial borrowing constraint.
Returns
-------
None
'''
# Find minimum allowable end-of-period assets at each permanent income level
PermIncMinNext = self.PermGroFac*self.PermShkMinNext*self.pLvlGrid**self.PermIncCorr
IncLvlMinNext = PermIncMinNext*self.TranShkMinNext
aLvlMin = (self.solution_next.mLvlMin(PermIncMinNext) - IncLvlMinNext)/self.Rfree
# Make a function for the natural borrowing constraint by permanent income
BoroCnstNat = LinearInterp(np.insert(self.pLvlGrid,0,0.0),np.insert(aLvlMin,0,0.0))
self.BoroCnstNat = BoroCnstNat
# Define the minimum allowable level of market resources by permanent income
if self.BoroCnstArt is not None:
BoroCnstArt = LinearInterp([0.0,1.0],[0.0,self.BoroCnstArt])
self.mLvlMinNow = UpperEnvelope(BoroCnstNat,BoroCnstArt)
else:
self.mLvlMinNow = BoroCnstNat
# Make the constrained total spending function: spend all market resources
trivial_grid = np.array([0.0,1.0]) # Trivial grid
spendAllFunc = TrilinearInterp(np.array([[[0.0,0.0],[0.0,0.0]],[[1.0,1.0],[1.0,1.0]]]),\
trivial_grid,trivial_grid,trivial_grid)
self.xFuncNowCnst = VariableLowerBoundFunc3D(spendAllFunc,self.mLvlMinNow)
self.mNrmMinNow = 0.0 # Needs to exist so as not to break when solution is created
self.MPCmaxEff = self.MPCmaxNow # Actually might vary by p, but no use formulating as a function
def getPointsForInterpolation(self,EndOfPrdvP,aLvlNow):
'''
Finds endogenous interpolation points (x,m) for the expenditure function.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvlNow : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
Returns
-------
x_for_interpolation : np.array
Total expenditure points for interpolation.
m_for_interpolation : np.array
Corresponding market resource points for interpolation.
p_for_interpolation : np.array
Corresponding permanent income points for interpolation.
'''
# Get size of each state dimension
mCount = aLvlNow.shape[1]
pCount = aLvlNow.shape[0]
MedCount = self.MedShkVals.size
# Calculate endogenous gridpoints and controls
cLvlNow = np.tile(np.reshape(self.uPinv(EndOfPrdvP),(1,pCount,mCount)),(MedCount,1,1))
MedBaseNow = np.tile(np.reshape(self.uMedPinv(self.MedPrice*EndOfPrdvP),(1,pCount,mCount)),
(MedCount,1,1))
MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals**(1.0/self.CRRAmed),(MedCount,1,1)),
(1,pCount,mCount))
MedLvlNow = MedShkVals_tiled*MedBaseNow
aLvlNow_tiled = np.tile(np.reshape(aLvlNow,(1,pCount,mCount)),(MedCount,1,1))
xLvlNow = cLvlNow + self.MedPrice*MedLvlNow
mLvlNow = xLvlNow + aLvlNow_tiled
# Limiting consumption is zero as m approaches the natural borrowing constraint
x_for_interpolation = np.concatenate((np.zeros((MedCount,pCount,1)),xLvlNow),axis=-1)
temp = np.tile(self.BoroCnstNat(np.reshape(self.pLvlGrid,(1,self.pLvlGrid.size,1))),
(MedCount,1,1))
m_for_interpolation = np.concatenate((temp,mLvlNow),axis=-1)
# Make a 3D array of permanent income for interpolation
p_for_interpolation = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(MedCount,1,mCount+1))
# Store for use by cubic interpolator
self.cLvlNow = cLvlNow
self.MedLvlNow = MedLvlNow
self.MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals,(MedCount,1,1)),(1,pCount,mCount))
return x_for_interpolation, m_for_interpolation, p_for_interpolation
def usePointsForInterpolation(self,xLvl,mLvl,pLvl,MedShk,interpolator):
'''
Constructs a basic solution for this period, including the consumption
function and marginal value function.
Parameters
----------
xLvl : np.array
Total expenditure points for interpolation.
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
MedShk : np.array
Corresponding medical need shocks for interpolation.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
# Construct the unconstrained total expenditure function
xFuncNowUnc = interpolator(mLvl,pLvl,MedShk,xLvl)
xFuncNowCnst = self.xFuncNowCnst
xFuncNow = LowerEnvelope3D(xFuncNowUnc,xFuncNowCnst)
# Transform the expenditure function into policy functions for consumption and medical care
aug_factor = 2
xLvlGrid = makeGridExpMult(np.min(xLvl),np.max(xLvl),aug_factor*self.aXtraGrid.size,8)
policyFuncNow = MedShockPolicyFunc(xFuncNow,xLvlGrid,self.MedShkVals,self.MedPrice,
self.CRRA,self.CRRAmed,xLvlCubicBool=self.CubicBool)
cFuncNow = cThruXfunc(xFuncNow,policyFuncNow.cFunc)
MedFuncNow = MedThruXfunc(xFuncNow,policyFuncNow.cFunc,self.MedPrice)
# Make the marginal value function (and the value function if vFuncBool=True)
vFuncNow, vPfuncNow = self.makevAndvPfuncs(policyFuncNow)
# Pack up the solution and return it
solution_now = ConsumerSolution(cFunc=cFuncNow, vFunc=vFuncNow, vPfunc=vPfuncNow,\
mNrmMin=self.mNrmMinNow)
solution_now.MedFunc = MedFuncNow
solution_now.policyFunc = policyFuncNow
return solution_now
def makevAndvPfuncs(self,policyFunc):
'''
Constructs the marginal value function for this period.
Parameters
----------
policyFunc : function
Consumption and medical care function for this period, defined over
market resources, permanent income level, and the medical need shock.
Returns
-------
vFunc : function
Value function for this period, defined over market resources and
permanent income.
vPfunc : function
Marginal value (of market resources) function for this period, defined
over market resources and permanent income.
'''
# Get state dimension sizes
mCount = self.aXtraGrid.size
pCount = self.pLvlGrid.size
MedCount = self.MedShkVals.size
# Make temporary grids to evaluate the consumption function
temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount))
aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)),
(mCount,1,MedCount))
pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount))
mGrid = temp_grid*pGrid + aMinGrid
if self.pLvlGrid[0] == 0:
mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount))
MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1))
probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1))
# Get optimal consumption (and medical care) for each state
cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid)
# Calculate expected value by "integrating" across medical shocks
if self.vFuncBool:
MedGrid = np.maximum(MedGrid,1e-100) # interpolation error sometimes makes Med < 0 (barely)
aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) # interpolation error sometimes makes tiny violations
vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid)
vNow = np.sum(vGrid*probsGrid,axis=2)
# Calculate expected marginal value by "integrating" across medical shocks
vPgrid = self.uP(cGrid)
vPnow = np.sum(vPgrid*probsGrid,axis=2)
# Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0)
mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),
(1,pCount)),mGrid[:,:,0]))
vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow)))
if self.vFuncBool:
vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0)
vNvrsPnow = vPnow*self.uinvP(vNow)
vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0)
# Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl
vPnvrsFunc_by_pLvl = []
vNvrsFunc_by_pLvl = []
for j in range(pCount): # Make a pseudo inverse marginal value function for each pLvl
pLvl = self.pLvlGrid[j]
m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl)
vPnvrs_temp = vPnvrsNow[:,j]
vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp))
if self.vFuncBool:
vNvrs_temp = vNvrsNow[:,j]
vNvrsP_temp = vNvrsPnow[:,j]
vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp))
vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid)
vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
if self.vFuncBool:
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid)
vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
# "Re-curve" the (marginal) value function
vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA)
if self.vFuncBool:
vFunc = ValueFunc2D(vNvrsFunc,self.CRRA)
else:
vFunc = NullFunc()
return vFunc, vPfunc
def makeLinearxFunc(self,mLvl,pLvl,MedShk,xLvl):
'''
Constructs the (unconstrained) expenditure function for this period using
bilinear interpolation (over permanent income and the medical shock) among
an array of linear interpolations over market resources.
Parameters
----------
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
MedShk : np.array
Corresponding medical need shocks for interpolation.
xLvl : np.array
Expenditure points for interpolation, corresponding to those in mLvl,
pLvl, and MedShk.
Returns
-------
xFuncUnc : BilinearInterpOnInterp1D
Unconstrained total expenditure function for this period.
'''
# Get state dimensions
pCount = mLvl.shape[1]
MedCount = mLvl.shape[0]
# Loop over each permanent income level and medical shock and make a linear xFunc
xFunc_by_pLvl_and_MedShk = [] # Initialize the empty list of lists of 1D xFuncs
for i in range(pCount):
temp_list = []
pLvl_i = pLvl[0,i,0]
mLvlMin_i = self.BoroCnstNat(pLvl_i)
for j in range(MedCount):
m_temp = mLvl[j,i,:] - mLvlMin_i
x_temp = xLvl[j,i,:]
temp_list.append(LinearInterp(m_temp,x_temp))
xFunc_by_pLvl_and_MedShk.append(deepcopy(temp_list))
# Combine the nested list of linear xFuncs into a single function
pLvl_temp = pLvl[0,:,0]
MedShk_temp = MedShk[:,0,0]
xFuncUncBase = BilinearInterpOnInterp1D(xFunc_by_pLvl_and_MedShk,pLvl_temp,MedShk_temp)
xFuncUnc = VariableLowerBoundFunc3D(xFuncUncBase,self.BoroCnstNat)
return xFuncUnc
def makeCubicxFunc(self,mLvl,pLvl,MedShk,xLvl):
'''
Constructs the (unconstrained) expenditure function for this period using
bilinear interpolation (over permanent income and the medical shock) among
an array of cubic interpolations over market resources.
Parameters
----------
mLvl : np.array
Corresponding market resource points for interpolation.
pLvl : np.array
Corresponding permanent income level points for interpolation.
MedShk : np.array
Corresponding medical need shocks for interpolation.
xLvl : np.array
Expenditure points for interpolation, corresponding to those in mLvl,
pLvl, and MedShk.
Returns
-------
xFuncUnc : BilinearInterpOnInterp1D
Unconstrained total expenditure function for this period.
'''
# Get state dimensions
pCount = mLvl.shape[1]
MedCount = mLvl.shape[0]
# Calculate the MPC and MPM at each gridpoint
EndOfPrdvPP = self.DiscFacEff*self.Rfree*self.Rfree*np.sum(self.vPPfuncNext(self.mLvlNext,\
self.pLvlNext)*self.ShkPrbs_temp,axis=0)
EndOfPrdvPP = np.tile(np.reshape(EndOfPrdvPP,(1,pCount,EndOfPrdvPP.shape[1])),(MedCount,1,1))
dcda = EndOfPrdvPP/self.uPP(np.array(self.cLvlNow))
dMedda = EndOfPrdvPP/(self.MedShkVals_tiled*self.uMedPP(self.MedLvlNow))
dMedda[0,:,:] = 0.0 # dMedda goes crazy when MedShk=0
MPC = dcda/(1.0 + dcda + self.MedPrice*dMedda)
MPM = dMedda/(1.0 + dcda + self.MedPrice*dMedda)
# Convert to marginal propensity to spend
MPX = MPC + self.MedPrice*MPM
MPX = np.concatenate((np.reshape(MPX[:,:,0],(MedCount,pCount,1)),MPX),axis=2) # NEED TO CALCULATE MPM AT NATURAL BORROWING CONSTRAINT
MPX[0,:,0] = self.MPCmaxNow
# Loop over each permanent income level and medical shock and make a cubic xFunc
xFunc_by_pLvl_and_MedShk = [] # Initialize the empty list of lists of 1D xFuncs
for i in range(pCount):
temp_list = []
pLvl_i = pLvl[0,i,0]
mLvlMin_i = self.BoroCnstNat(pLvl_i)
for j in range(MedCount):
m_temp = mLvl[j,i,:] - mLvlMin_i
x_temp = xLvl[j,i,:]
MPX_temp = MPX[j,i,:]
temp_list.append(CubicInterp(m_temp,x_temp,MPX_temp))
xFunc_by_pLvl_and_MedShk.append(deepcopy(temp_list))
# Combine the nested list of cubic xFuncs into a single function
pLvl_temp = pLvl[0,:,0]
MedShk_temp = MedShk[:,0,0]
xFuncUncBase = BilinearInterpOnInterp1D(xFunc_by_pLvl_and_MedShk,pLvl_temp,MedShk_temp)
xFuncUnc = VariableLowerBoundFunc3D(xFuncUncBase,self.BoroCnstNat)
return xFuncUnc
def makeBasicSolution(self,EndOfPrdvP,aLvl,interpolator):
'''
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aLvl : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
xLvl,mLvl,pLvl = self.getPointsForInterpolation(EndOfPrdvP,aLvl)
MedShk_temp = np.tile(np.reshape(self.MedShkVals,(self.MedShkVals.size,1,1)),\
(1,mLvl.shape[1],mLvl.shape[2]))
solution_now = self.usePointsForInterpolation(xLvl,mLvl,pLvl,MedShk_temp,interpolator)
return solution_now
def addvPPfunc(self,solution):
'''
Adds the marginal marginal value function to an existing solution, so
that the next solver can evaluate vPP and thus use cubic interpolation.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, which must include the
consumption function.
Returns
-------
solution : ConsumerSolution
The same solution passed as input, but with the marginal marginal
value function for this period added as the attribute vPPfunc.
'''
vPPfuncNow = MargMargValueFunc2D(solution.vPfunc.cFunc,self.CRRA)
solution.vPPfunc = vPPfuncNow
return solution
def solve(self):
'''
Solves a one period consumption saving problem with risky income and
shocks to medical need.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function, medical spending function ( both defined over market re-
sources, permanent income, and medical shock), a marginal value func-
tion (defined over market resources and permanent income), and human
wealth as a function of permanent income.
'''
aLvl,trash = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
if self.vFuncBool:
self.makeEndOfPrdvFunc(EndOfPrdvP)
if self.CubicBool:
interpolator = self.makeCubicxFunc
else:
interpolator = self.makeLinearxFunc
solution = self.makeBasicSolution(EndOfPrdvP,aLvl,interpolator)
solution = self.addMPCandHumanWealth(solution)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution
def solveConsMedShock(solution_next,IncomeDstn,MedShkDstn,LivPrb,DiscFac,CRRA,CRRAmed,Rfree,MedPrice,
PermGroFac,PermIncCorr,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool):
'''
Solve the one period problem for a consumer with shocks to permanent and
transitory income as well as medical need shocks (as multiplicative shifters
for utility from a second medical care good).
Parameters
----------
solution_next : ConsumerSolution
The solution to next period's one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
MedShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter for med-
ical care. Order: probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion for composite consumption.
CRRAmed : float
Coefficient of relative risk aversion for medical care.
Rfree : float
Risk free interest factor on end-of-period assets.
MedPrice : float
Price of unit of medical care relative to unit of consumption.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
PermIncCorr : float
Correlation of permanent income from period to period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with.
aXtraGrid: np.array
Array of "extra" end-of-period (normalized) asset values-- assets
above the absolute minimum acceptable level.
pLvlGrid: np.array
Array of permanent income levels at which to solve the problem.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution : ConsumerSolution
Solution to this period's problem, including a consumption function,
medical spending function, and marginal value function. The former two
are defined over (mLvl,pLvl,MedShk), while the latter is defined only
on (mLvl,pLvl), with MedShk integrated out.
'''
solver = ConsMedShockSolver(solution_next,IncomeDstn,MedShkDstn,LivPrb,DiscFac,CRRA,CRRAmed,Rfree,
MedPrice,PermGroFac,PermIncCorr,BoroCnstArt,aXtraGrid,pLvlGrid,vFuncBool,CubicBool)
solver.prepareToSolve() # Do some preparatory work
solution_now = solver.solve() # Solve the period
return solution_now
###############################################################################
if __name__ == '__main__':
import ConsumerParameters as Params
from HARKutilities import CRRAutility_inv
from time import clock
import matplotlib.pyplot as plt
mystr = lambda number : "{:.4f}".format(number)
do_simulation = False
# Make and solve an example medical shocks consumer type
MedicalExample = MedShockConsumerType(**Params.init_medical_shocks)
MedicalExample.cycles = 0
t_start = clock()
MedicalExample.solve()
t_end = clock()
print('Solving a medical shocks consumer took ' + mystr(t_end-t_start) + ' seconds.')
# Plot the consumption function
M = np.linspace(0,30,300)
pLvl = 1.0
P = pLvl*np.ones_like(M)
for j in range(MedicalExample.MedShkDstn[0][0].size):
MedShk = MedicalExample.MedShkDstn[0][1][j]*np.ones_like(M)
M_temp = M + MedicalExample.solution[0].mLvlMin(pLvl)
C = MedicalExample.solution[0].cFunc(M_temp,P,MedShk)
plt.plot(M_temp,C)
print('Consumption function by medical need shock (constant permanent income)')
plt.show()
# Plot the medical care function
for j in range(MedicalExample.MedShkDstn[0][0].size):
MedShk = MedicalExample.MedShkDstn[0][1][j]*np.ones_like(M)
Med = MedicalExample.solution[0].MedFunc(M_temp,P,MedShk)
plt.plot(M_temp,Med)
print('Medical care function by medical need shock (constant permanent income)')
plt.ylim([0,20])
plt.show()
# Plot the savings function
for j in range(MedicalExample.MedShkDstn[0][0].size):
MedShk = MedicalExample.MedShkDstn[0][1][j]*np.ones_like(M)
Sav = M_temp - MedicalExample.solution[0].cFunc(M_temp,P,MedShk) - MedicalExample.MedPrice[0]*\
MedicalExample.solution[0].MedFunc(M_temp,P,MedShk)
plt.plot(M_temp,Sav)
print('End of period savings by medical need shock (constant permanent income)')
plt.show()
# Plot the marginal value function
M = np.linspace(0.0,30,300)
for p in range(MedicalExample.pLvlGrid[0].size):
pLvl = MedicalExample.pLvlGrid[0][p]
M_temp = pLvl*M + MedicalExample.solution[0].mLvlMin(pLvl)
P = pLvl*np.ones_like(M)
vP = MedicalExample.solution[0].vPfunc(M_temp,P)**(-1.0/MedicalExample.CRRA)
plt.plot(M_temp,vP)
print('Marginal value function (pseudo inverse)')
plt.show()
if MedicalExample.vFuncBool:
# Plot the value function
M = np.linspace(0.0,1,300)
for p in range(MedicalExample.pLvlGrid[0].size):
pLvl = MedicalExample.pLvlGrid[0][p]
M_temp = pLvl*M + MedicalExample.solution[0].mLvlMin(pLvl)
P = pLvl*np.ones_like(M)
v = CRRAutility_inv(MedicalExample.solution[0].vFunc(M_temp,P),gam=MedicalExample.CRRA)
plt.plot(M_temp,v)
print('Value function (pseudo inverse)')
plt.show()
if do_simulation:
MedicalExample.sim_periods = 100
MedicalExample.DiePrb = 1.0 - MedicalExample.LivPrb[0]
MedicalExample.makeIncShkHist()
MedicalExample.makeMedShkHist()
MedicalExample.initializeSim()
MedicalExample.simConsHistory()
| apache-2.0 |
ioam/holoviews | holoviews/element/annotation.py | 1 | 13192 | from numbers import Number
import numpy as np
import param
from ..core.util import datetime_types, basestring
from ..core import Dimension, Element2D, Element
from ..core.data import Dataset
class Annotation(Element2D):
"""
An Annotation is a special type of element that is designed to be
overlaid on top of any arbitrary 2D element. Annotations have
neither key nor value dimensions allowing them to be overlaid over
any type of data.
Note that one or more Annotations *can* be displayed without being
overlaid on top of any other data. In such instances (by default)
they will be displayed using the unit axis limits (0.0-1.0 in both
directions) unless an explicit 'extents' parameter is
supplied. The extents of the bottom Annotation in the Overlay is
used when multiple Annotations are displayed together.
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2))
group = param.String(default='Annotation', constant=True)
_auxiliary_component = True
def __init__(self, data, **params):
super(Annotation, self).__init__(data, **params)
def __len__(self):
return 1
def __getitem__(self, key):
if key in self.dimensions():
return self.dimension_values(key)
if not isinstance(key, tuple) or len(key) == 1:
key = (key, slice(None))
elif len(key) == 0:
return self.clone()
if not all(isinstance(k, slice) for k in key):
raise KeyError("%s only support slice indexing" %
self.__class__.__name__)
xkey, ykey = tuple(key[:len(self.kdims)])
xstart, xstop = xkey.start, xkey.stop
ystart, ystop = ykey.start, ykey.stop
return self.clone(self.data, extents=(xstart, ystart, xstop, ystop))
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
index = self.get_dimension_index(dimension)
if index == 0:
return np.array([self.data if np.isscalar(self.data) else self.data[index]])
elif index == 1:
return [] if np.isscalar(self.data) else np.array([self.data[1]])
else:
return super(Annotation, self).dimension_values(dimension)
# Note: This version of clone is identical in path.BaseShape
# Consider implementing a mix-in class if it is needed again.
def clone(self, *args, **overrides):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# Apply name mangling for __ attribute
pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', [])
settings = {k: v for k, v in dict(self.get_param_values(), **overrides).items()
if k not in pos_args[:len(args)]}
if 'id' not in settings:
settings['id'] = self.id
return self.__class__(*args, **settings)
class VLine(Annotation):
"""Vertical line annotation at the given position."""
group = param.String(default='VLine', constant=True)
x = param.ClassSelector(default=0, class_=(Number, ) + datetime_types, doc="""
The x-position of the VLine which make be numeric or a timestamp.""")
__pos_params = ['x']
def __init__(self, x, **params):
super(VLine, self).__init__(x, x=x, **params)
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
index = self.get_dimension_index(dimension)
if index == 0:
return np.array([self.data])
elif index == 1:
return np.array([])
else:
return super(VLine, self).dimension_values(dimension)
class HLine(Annotation):
"""Horizontal line annotation at the given position."""
group = param.String(default='HLine', constant=True)
y = param.ClassSelector(default=0, class_=(Number, ) + datetime_types, doc="""
The y-position of the VLine which make be numeric or a timestamp.""")
__pos_params = ['y']
def __init__(self, y, **params):
super(HLine, self).__init__(y, y=y, **params)
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
index = self.get_dimension_index(dimension)
if index == 0:
return np.array([])
elif index == 1:
return np.array([self.data])
else:
return super(HLine, self).dimension_values(dimension)
class Spline(Annotation):
"""
Draw a spline using the given handle coordinates and handle
codes. The constructor accepts a tuple in format (coords, codes).
Follows format of matplotlib spline definitions as used in
matplotlib.path.Path with the following codes:
Path.STOP : 0
Path.MOVETO : 1
Path.LINETO : 2
Path.CURVE3 : 3
Path.CURVE4 : 4
Path.CLOSEPLOY: 79
"""
group = param.String(default='Spline', constant=True)
def __init__(self, spline_points, **params):
super(Spline, self).__init__(spline_points, **params)
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides):
"""Clones the object, overriding data and parameters.
Args:
data: New data replacing the existing data
shared_data (bool, optional): Whether to use existing data
new_type (optional): Type to cast object to
*args: Additional arguments to pass to constructor
**overrides: New keyword arguments to pass to constructor
Returns:
Cloned Spline
"""
return Element2D.clone(self, data, shared_data, new_type,
*args, **overrides)
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
index = self.get_dimension_index(dimension)
if index in [0, 1]:
return np.array([point[index] for point in self.data[0]])
else:
return super(Spline, self).dimension_values(dimension)
class Arrow(Annotation):
"""
Draw an arrow to the given xy position with optional text at
distance 'points' away. The direction of the arrow may be
specified as well as the arrow head style.
"""
x = param.ClassSelector(default=0, class_=(Number, ) + datetime_types, doc="""
The x-position of the arrow which make be numeric or a timestamp.""")
y = param.ClassSelector(default=0, class_=(Number, ) + datetime_types, doc="""
The y-position of the arrow which make be numeric or a timestamp.""")
text = param.String(default='', doc="Text associated with the arrow.")
direction = param.ObjectSelector(default='<',
objects=['<', '^', '>', 'v'], doc="""
The cardinal direction in which the arrow is pointing. Accepted
arrow directions are '<', '^', '>' and 'v'.""")
arrowstyle = param.ObjectSelector(default='->',
objects=['-', '->', '-[', '-|>', '<->', '<|-|>'],
doc="""
The arrowstyle used to draw the arrow. Accepted arrow styles are
'-', '->', '-[', '-|>', '<->' and '<|-|>'""")
points = param.Number(default=40, doc="Font size of arrow text (if any).")
group = param.String(default='Arrow', constant=True)
__pos_params = ['x', 'y', 'text', 'direction', 'points', 'arrowstyle']
def __init__(self, x, y, text='', direction='<',
points=40, arrowstyle='->', **params):
info = (x, y, text, direction, points, arrowstyle)
super(Arrow, self).__init__(info, x=x, y=y,
text=text, direction=direction,
points=points, arrowstyle=arrowstyle,
**params)
def __setstate__(self, d):
"""
Add compatibility for unpickling old Arrow types with different
.data format.
"""
super(Arrow, self).__setstate__(d)
if len(self.data) == 5:
direction, text, (x, y), points, arrowstyle = self.data
self.data = (x, y, text, direction, points, arrowstyle)
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
index = self.get_dimension_index(dimension)
if index == 0:
return np.array([self.x])
elif index == 1:
return np.array([self.y])
else:
return super(Arrow, self).dimension_values(dimension)
class Text(Annotation):
"""
Draw a text annotation at the specified position with custom
fontsize, alignment and rotation.
"""
x = param.ClassSelector(default=0, class_=(Number, basestring) + datetime_types, doc="""
The x-position of the arrow which make be numeric or a timestamp.""")
y = param.ClassSelector(default=0, class_=(Number, basestring) + datetime_types, doc="""
The y-position of the arrow which make be numeric or a timestamp.""")
text = param.String(default='', doc="The text to be displayed.")
fontsize = param.Number(default=12, doc="Font size of the text.")
rotation = param.Number(default=0, doc="Text rotation angle in degrees.")
halign = param.ObjectSelector(default='center',
objects=['left', 'right', 'center'], doc="""
The horizontal alignment position of the displayed text. Allowed values
are 'left', 'right' and 'center'.""")
valign = param.ObjectSelector(default='center',
objects=['top', 'bottom', 'center'], doc="""
The vertical alignment position of the displayed text. Allowed values
are 'center', 'top' and 'bottom'.""")
group = param.String(default='Text', constant=True)
__pos_params = ['x', 'y', 'text', 'fontsize', 'halign', 'valign', 'rotation']
def __init__(self, x, y, text, fontsize=12,
halign='center', valign='center', rotation=0, **params):
info = (x, y, text, fontsize, halign, valign, rotation)
super(Text, self).__init__(info, x=x, y=y, text=text,
fontsize=fontsize, rotation=rotation,
halign=halign, valign=valign, **params)
class Div(Element):
"""
The Div element represents a div DOM node in an HTML document defined
as a string containing valid HTML.
"""
group = param.String(default='Div', constant=True)
def __init__(self, data, **params):
if not isinstance(data, basestring):
raise ValueError("Div element html data must be a string "
"type, found %s type." % type(data).__name__)
super(Div, self).__init__(data, **params)
class Labels(Dataset, Element2D):
"""
Labels represents a collection of text labels associated with 2D
coordinates. Unlike the Text annotation, Labels is a Dataset type
which allows drawing vectorized labels from tabular or gridded
data.
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2), constant=True, doc="""
The label of the x- and y-dimension of the Labels element in form
of a string or dimension object.""")
group = param.String(default='Labels', constant=True)
vdims = param.List([Dimension('Label')], bounds=(1, None), doc="""
Defines the value dimension corresponding to the label text.""")
| bsd-3-clause |
vkscool/nupic | examples/opf/tools/MirrorImageViz/mirrorImageViz.py | 8 | 7224 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| gpl-3.0 |
atantet/transferCZ | tau/get_tau_anom_white.py | 1 | 11484 | import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from mpl_toolkits.basemap import Basemap, addcyclic
from scipy.io import FortranFile
initDir = '../data/init/'
nlat = 31
nlon = 30
year0 = 1961
yearf = 1994
gridName = "%dx%d" % (nlat, nlon)
periodName = "%d%d" % (year0, yearf)
postfix = '%s_%s' % (gridName, periodName)
dstPostfix = ''
noiseType = 'White'
nEOF = 1
#nEOF = 3
seed = 0
sstFile = "ersst.%s.nc" % postfix
psFile = "pac.%s.nc" % postfix
T0 = 30.
rhoAir = 1.2
CD = 1.2e-3 # Large & Pond 1981 for w < 11 m/s
L = 1.5e7
c0 = 2.
rho = 1024
H = 200.
toStress = rhoAir * CD
toND = L / (c0**2 * rho * H) # = F0 / tau0
# time
T = 600 # duration in years
nBlocks = 1
dt = 0.060 # adimensional time-step
adim2Sec = L / c0 # Conversion from adimensional to seconds
sec2Years = 1. / (60*60*24*365)
adim2Years = adim2Sec * sec2Years
print 'Getting stochastic wind stress for a seed of ', seed
# Read cartesian coordinates
x = np.loadtxt('%s/lon_%s.txt' % (initDir, gridName))
y = np.loadtxt('%s/lat_%s.txt' % (initDir, gridName))
(X2, Y2) = np.meshgrid(x, y)
# Read sst
dset = Dataset(sstFile, "r")
sst = dset.variables["sst"][:]
lat = dset.variables["lat"][:]
lon = dset.variables["lon"][:]
(LON, LAT) = np.meshgrid(lon, lat)
ntObs = sst.shape[0]
dset.close()
# Map definition
llcrnrlon = lon.min()
llcrnrlat = lat.min()
urcrnrlon = lon.max()
urcrnrlat = lat.max()
nlev = 10
map = Basemap(projection='merc', llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, resolution='c')
(x, y) = map(LON, LAT)
# Read zonal pseudo wind-stress
dset = Dataset(psFile, "r")
Wu = dset.variables["Wu"][:]
dset.close()
# Get mask
N = nlat * nlon
sst = sst.reshape(ntObs, N)
Wu = Wu.reshape(ntObs, N)
mask = np.any(sst.mask, 0) | np.any(Wu.mask, 0)
sstMasked = np.array(sst[:, ~mask])
WuMasked = np.array(Wu[:, ~mask])
lonlMasked = LON.flatten()[~mask]
latlMasked = LAT.flatten()[~mask]
nValid = N - mask.sum()
# Remove mean, remove seasonal cycle, put back mean
print 'Getting anomalies...'
sstMean = sstMasked.mean(0)
WuMean = WuMasked.mean(0)
# Get anomalies with respect to the mean
sstMaskedAnom = sstMasked - np.tile(np.expand_dims(sstMean, 0), (ntObs, 1))
WuMaskedAnom = WuMasked - np.tile(np.expand_dims(WuMean, 0), (ntObs, 1))
# Get anomalies with respect to the seasonal cycle
ssta = np.copy(sstMaskedAnom,)
Wua = np.copy(WuMaskedAnom)
for k in np.arange(12):
ssta[k::12] -= np.tile(np.expand_dims(sstMaskedAnom[k::12].mean(0), 0),
(sstMaskedAnom[k::12].shape[0], 1))
Wua[k::12] -= np.tile(np.expand_dims(WuMaskedAnom[k::12].mean(0), 0),
(WuMaskedAnom[k::12].shape[0], 1))
# Regressions
print 'Getting wind stress residuals...'
WuResDim = np.copy(Wua)
for ij in np.arange(nValid):
X = np.matrix(ssta[:, ij]).T
Y = np.matrix(Wua[:, ij]).T
A = (X.T * X)**(-1) * (X.T * Y)
# Remove the part of the windstress that can
# be explained by the linear model with SST
WuResDim[:, ij] = np.squeeze(np.array(Y - X * A))
# Adimensionalize ! F0=L tau0/(co^2 rho H)
print 'Get adimensional residual wind stress...'
WuRes = WuResDim * toStress * toND
# Plot std of WuRes
fig = plt.figure()
field = np.ma.masked_all((nlat*nlon,))
field[~mask] = WuRes.std(0)
field = field.reshape(nlat, nlon)
vmax = np.max(field)
vmin = 0.
levels = np.linspace(vmin, vmax, nlev)
cs = map.contourf(x, y, field, levels, cmap=cm.hot_r)
plt.title('Std of residual wind-stress')
map.drawcoastlines()
# draw parallels and meridians.
map.drawparallels(np.arange(0, 81.,10.))
map.drawmeridians(np.arange(-180.,181.,30.))
plt.colorbar(orientation='horizontal')
fig.savefig('tauResStd%s.png' % dstPostfix, bbox_inches='tight')
# EOF Decomposition
print 'EOF decomposition...'
# Get covariance matrix
sim = np.cov(WuRes, rowvar=False)
# Eigenvector decomposition
(w, v) = np.linalg.eigh(sim)
# Get principal component
pc = np.dot(WuRes, v)
isort = np.argsort(w)
w = w[isort][::-1]
v = v[:, isort][:, ::-1]
pc = pc[:, isort][:, ::-1]
wn = w / w.sum()
# Plot first EOFs
print 'First %d EOFs explained variance: %2d%%' \
% (nEOF, (wn[:nEOF] * 100).astype(int))
print 'First %d EOFs cumulated explained variance: %2d%%' \
% (nEOF, (wn[:nEOF].cumsum() * 100).astype(int))
for k in np.arange(nEOF):
fig = plt.figure()
eof = np.ma.masked_all((nlat*nlon,))
eof[~mask] = v[:, k]
eof = eof.reshape(nlat, nlon)
vmax = np.max(np.abs(eof))
vmin = -vmax
levels = np.linspace(vmin, vmax, nlev)
cs = map.contourf(x, y, eof, levels, cmap=cm.RdBu_r)
plt.title('EOF #' + str(k) + " explaining %2d%% of variance" \
% (wn[k] * 100,))
map.drawcoastlines()
# draw parallels and meridians.
map.drawparallels(np.arange(0, 81.,10.))
map.drawmeridians(np.arange(-180.,181.,30.))
plt.colorbar(orientation='horizontal')
fig.savefig('tau_eof%d%s.png' % (k, dstPostfix), bbox_inches='tight')
# Get Periodograms
sampPeriod = 1.
window = np.hamming(ntObs)
# Get nearest larger power of 2
if np.log2(ntObs) != int(np.log2(ntObs)):
nfft = 2**(int(np.log2(ntObs)) + 1)
else:
nfft = ntObs
# Get frequencies and shift zero frequency to centObser
freq = np.fft.fftfreq(nfft, d=sampPeriod)
freq = np.fft.fftshift(freq)
freqYear = freq * 12
nt = int(T / (dt * adim2Years))
windowrn = np.hamming(nt)
if np.log2(nt) != int(np.log2(nt)):
nfftrn = 2**(int(np.log2(nt)) + 1)
else:
nfftrn = nt
# Get frequencies and shift zero frequency to center
#freqrn = np.fft.fftfreq(nfftrn, d=sampPeriod)
freqrn = np.fft.fftfreq(nfftrn, d=sampPeriod)
freqrn = np.fft.fftshift(freqrn)
freqrnYear = freqrn * 12
nRAVG = 5
nRAVGrn = int(nRAVG * nfftrn * 1. / nfft * 0.1)
# Get NINO4
print 'Getting NINO4 index...'
nino4slat = -5.
nino4nlat = 5.
nino4wlon = 160.
nino4elon = 210.
nino4 = WuRes[:,(lonlMasked >= nino4wlon)
& (lonlMasked <= nino4elon)
& (latlMasked >= nino4slat)
& (latlMasked <= nino4nlat)].mean(1) / toND
# Get periodogram of zonal wind stress averaged over nino4
ts = nino4# / nt
# Apply window
tsWindowed = ts * window
# Fourier transform and shift zero frequency to center
fts = np.fft.fft(tsWindowed, nfft, 0)
fts = np.fft.fftshift(fts)
# Get periodogram
perio = np.abs(fts / nt)#**2
# Apply running average
perioRAVG = perio.copy()
for k in np.arange(nRAVG/2, nfft-nRAVG/2):
perioRAVG[k] = perio[k-nRAVG/2:k+nRAVG/2 + 1].mean() / nRAVG
# Plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(freqYear, np.log10(perioRAVG))
# ax.set_xscale('log')
# ax.set_yscale('log')
ax.set_xlim(0, 4)
#ax.set_ylim(0, vmax)
plt.title('Periodogram of residual wind stress averaged over Nino4')
fig.savefig('nino4_perio%s.png' % dstPostfix, bbox_inches='tight')
# Get white noise parameters (std)
print 'Getting white-noise parameters of principal components...'
rn = np.empty((1, nEOF))
for k in np.arange(nEOF):
rn[0, k] = pc[:, k].std()
# # Plot FFTs
# print 'Plotting periodograms...'
# for k in np.arange(nEOF):
# ts = pc[:, k]
# # FFT
# # Apply window
# tsWindowed = ts * window
# # Fourier transform and shift zero frequency to center
# fts = np.fft.fft(tsWindowed, nfft, 0)
# fts = np.fft.fftshift(fts)
# # Get periodogram
# perio = np.abs(fts / nt)**2
# # Apply running average
# perioRAVG = perio.copy()
# for i in np.arange(nRAVG/2, nfft-nRAVG/2):
# perioRAVG[i] = perio[i-nRAVG/2:i+nRAVG/2 + 1].mean() / nRAVG
# tsrn = pcwn[:, k]
# # FFT
# # Apply window
# tsrnWindowed = tsrn * windowrn
# # Fourier transform and shift zero frequency to center
# ftsrn = np.fft.fft(tsrnWindowed, nfftrn, 0)
# ftsrn = np.fft.fftshift(ftsrn)
# # Get periodogram
# periorn = np.abs(ftsrn / nt)**2
# # Apply running average
# periornRAVG = periorn.copy()
# for i in np.arange(nRAVGrn/2, nfftrn-nRAVGrn/2):
# periornRAVG[i] = periorn[i-nRAVGrn/2:i+nRAVGrn/2 + 1].mean() / nRAVGrn
# Pwn = 1. / nt**2
# # Plot
# fig = plt.figure()
# ax = fig.add_subplot(1,1,1)
# ax.plot(freqYear, np.log10(perioRAVG), '-k', linewidth=2)
# ax.plot(freqrnYear, np.log10(periornRAVG), ':k', linewidth=2)
# # ax.plot(freqYear, np.log10(Pwn), '--k')
# # ax.set_xscale('log')
# # ax.set_yscale('log')
# ax.set_xlim(0, 4)
# #ax.set_ylim(0, vmax)
# plt.title('Periodogram of principal component %d' % k)
# plt.legend(('Periodogram of pc %d' % k,
# 'Periodogram of white-noise fitted to pc %d' % k),
# # 'Theoretical periodogram of white-noise fitted to pc %d' % k),
# loc='lower left')
# fig.savefig('perio_pc%d%s.png' % (k, dstPostfix),
# bbox_inches='tight')
ntBlock = int(nt / nBlocks)
ftau = []
fdtaudx = []
fdtaudy = []
# Use a centBlockered order 1 skim
X3 = np.tile(np.expand_dims(X2, 0), (ntBlock, 1, 1))
dX3CentBlocker = X3[:, :, 2:] - X3[:, :, :-2]
dX3Left = X3[:, :, 1] - X3[:, :, 0]
dX3Right = X3[:, :, -1] - X3[:, :, -2]
Y3 = np.tile(np.expand_dims(Y2, 0), (ntBlock, 1, 1))
dY3CentBlocker = Y3[:, 2:] - Y3[:, :-2]
dY3Left = Y3[:, 1] - Y3[:, 0]
dY3Right = Y3[:, -1] - Y3[:, -2]
# Set the seed
np.random.seed(seed)
for k in np.arange(nEOF):
ftau.append(FortranFile('%s/tau%s_%deofs_seed%d%s.bin' \
% (initDir, noiseType, k+1, seed, dstPostfix),
'w'))
fdtaudx.append(FortranFile('%s/dtaudx%s_%deofs_seed%d%s.bin' \
% (initDir, noiseType, k+1,
seed, dstPostfix), 'w'))
fdtaudy.append(FortranFile('%s/dtaudy%s_%deofs_seed%d%s.bin' % \
(initDir, noiseType, k+1,
seed, dstPostfix), 'w'))
for block in np.arange(nBlocks):
print 'Saving block ', (block+1), ' of ', nBlocks
pcwn = np.empty((ntBlock, nEOF))
# Write-noise time series and its derivative w.r.t x and y
# Write tau with variations
tauNoNorm = np.zeros((ntBlock, nlat, nlon))
for k in np.arange(nEOF):
pcwn[:, k] = np.random.normal(0, rn[0, k], size=ntBlock)
eof = np.zeros((nlat*nlon,))
eof[~mask] = v[:, k]
eof = eof.reshape(nlat, nlon)
tauNoNorm += np.tile(np.expand_dims(np.expand_dims(pcwn[:, k], 1), 2),
(1, nlat, nlon)) \
* np.tile(np.expand_dims(eof, 0),
(ntBlock, 1, 1))
# Normalize
tau = tauNoNorm / np.tile(tauNoNorm.std(0).mean(), (ntBlock, 1, 1))
dtaudx = np.empty(tau.shape)
dtaudx[:, :, 1:-1] = (tau[:, :, 2:] - tau[:, :, :-2]) / dX3CentBlocker
dtaudx[:, :, 0] = (tau[:, :, 1] - tau[:, :, 0]) / dX3Left
dtaudx[:, :, -1] = (tau[:, :, -1] - tau[:, :, -2]) / dX3Right
dtaudy = np.empty(tau.shape)
dtaudy[:, 1:-1] = (tau[:, 2:] - tau[:, :-2]) / dY3CentBlocker
dtaudy[:, 0] = (tau[:, 1] - tau[:, 0]) / dY3Left
dtaudy[:, -1] = (tau[:, -1] - tau[:, -2]) / dY3Right
print 'Writing tau with %d eofs...' % (k+1,)
for t in np.arange(ntBlock):
ftau[k].write_record(tau[t])
fdtaudx[k].write_record(dtaudx[t])
fdtaudy[k].write_record(dtaudy[t])
for k in np.arange(nEOF):
ftau[k].close()
fdtaudx[k].close()
fdtaudy[k].close()
| gpl-2.0 |
holmes/intellij-community | python/helpers/pydev/pydev_ipython/inputhook.py | 52 | 18411 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__)
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt4",
"disable_qt4",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| apache-2.0 |
h2educ/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
harshaneelhg/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/io/msgpack/__init__.py | 26 | 1233 | # coding: utf-8
from collections import namedtuple
from pandas.io.msgpack.exceptions import * # noqa
from pandas.io.msgpack._version import version # noqa
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os # noqa
from pandas.io.msgpack._packer import Packer # noqa
from pandas.io.msgpack._unpacker import unpack, unpackb, Unpacker # noqa
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| gpl-2.0 |
mcanthony/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| agpl-3.0 |
ninotoshi/tensorflow | tensorflow/contrib/learn/python/learn/io/__init__.py | 5 | 1709 | """Tools to allow different io formats."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.io.pandas_io import HAS_PANDAS
# pylint: disable=g-import-not-at-top
if HAS_PANDAS:
from tensorflow.contrib.learn.python.learn.io.pandas_io import pd
| apache-2.0 |
borg-project/borg | borg/regression.py | 1 | 4929 | """@author: Bryan Silverthorn <[email protected]>"""
import numpy
import sklearn.svm
import sklearn.pipeline
import sklearn.linear_model
import sklearn.decomposition
import sklearn.kernel_approximation
import borg
logger = borg.get_logger(__name__, default_level = "INFO")
class MultiClassifier(object):
def __init__(self, model_class, **model_kwargs):
self._model_class = model_class
self._model_kwargs = model_kwargs
def fit(self, X, Y):
(N, D) = Y.shape
(_, self._F) = X.shape
logger.info("fitting %i models to %i examples", D, N)
self._models = [None] * D
for d in xrange(D):
if d % 250 == 0:
logger.info("fit %i models so far", d)
if numpy.any(Y[:, d] > 0):
model = self._model_class(**self._model_kwargs)
model.fit(X, Y[:, d], class_weight = {0: 1.0, 1: 10.0})
else:
model = None
self._models[d] = model
return self
def predict_log_proba(self, X):
(M, F) = X.shape
D = len(self._models)
z = numpy.empty((M, D))
for (d, model) in enumerate(self._models):
if model is None:
z[:, d] = 0.0
else:
# TODO use predict_log_proba when it stops tossing warnings
z[:, d] = numpy.log(model.predict_proba(X)[:, 1] + 1e-64)
return z
def get_feature_weights(self):
coefs_list = []
for model in self._models:
if model is None:
coefs_list.append([0.0] * self._F)
else:
assert model.coef_.shape == (1, self._F)
coefs_list.append(model.coef_[0])
coefs = numpy.array(coefs_list)
weights = numpy.mean(numpy.abs(coefs), axis = 0)
return weights
def mapify_model_survivals(model):
"""Compute per-instance MAP survival functions."""
(P, S, D) = model.log_masses.shape
(_, F) = model.features.shape
unique_names = numpy.unique(model.names)
(N,) = unique_names.shape
masks = numpy.empty((N, P), bool)
map_survivals = numpy.empty((N, S, D))
features = numpy.empty((N, F))
logger.info("computing MAP RTDs over %i samples", P)
for (n, name) in enumerate(unique_names):
masks[n] = mask = model.names == name
features[n] = model.features[mask][0]
log_survivals = model.log_survival[mask]
log_weights = model.log_weights[mask]
log_weights -= numpy.logaddexp.reduce(log_weights)
map_survivals[n, :, :] = numpy.logaddexp.reduce(log_survivals + log_weights[:, None, None])
return (unique_names, masks, features, numpy.exp(map_survivals))
class NearestRTDRegression(object):
"""Predict nearest RTDs."""
def __init__(self, model):
self._model = model
(names, self._masks, features, survivals) = mapify_model_survivals(model)
(N, _) = features.shape
logger.info("computing %i^2 == %i inter-RTD distances", N, N * N)
distances = borg.bregman.survival_distances_all(survivals)
nearest = numpy.zeros((N, N), dtype = numpy.intc)
nearest_count = min(32, N / 4)
for n in xrange(N):
nearest[n, numpy.argsort(distances[n])[:nearest_count]] = 1
logger.info("fitting classifier to nearest RTDs")
classifier = MultiClassifier(sklearn.linear_model.LogisticRegression)
#classifier = MultiClassifier(sklearn.svm.SVC, scale_C = True, probability = True)
#classifier = MultiClassifier(sklearn.linear_model.LogisticRegression, penalty = "l1", C = 1e-1)
#classifier = MultiClassifier(sklearn.linear_model.LogisticRegression, penalty = "l2", C = 1e-2)
self._regression = \
sklearn.pipeline.Pipeline([
#("pca", sklearn.decomposition.PCA(whiten = True)),
#("kernel", sklearn.kernel_approximation.RBFSampler(n_components = 1000)),
("scaler", sklearn.preprocessing.Scaler()),
("classifier", classifier),
]) \
.fit(features, nearest)
def predict(self, tasks, features):
"""Predict RTD probabilities."""
features = numpy.asarray(features)
(P,) = self._model.log_weights.shape
(N, _) = self._masks.shape
(M, F) = features.shape
predictions = self._regression.predict_log_proba(features)
weights = numpy.empty((M, P))
weights[:, :] = self._model.log_weights[None, :]
for n in xrange(N):
weights[:, self._masks[n]] += predictions[:, n, None]
weights = numpy.exp(weights)
weights += 1e-64
weights /= numpy.sum(weights, axis = -1)[..., None]
return weights
@property
def classifier(self):
(_, classifier) = self._regression.steps[-1]
return classifier
| mit |
fcchou/CS229-project | learning/final_plot.py | 1 | 6374 | import sys
sys.path.append('../')
from jos_learn.features import FeatureExtract
import numpy as np
import matplotlib.pyplot as plt
import sklearn.cross_validation as cv
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC, SVC
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import precision_score, recall_score, precision_recall_curve, auc
from sklearn.grid_search import GridSearchCV
from sklearn.feature_extraction.text import TfidfTransformer
import cPickle as pickle
from sklearn.decomposition import TruncatedSVD, PCA, KernelPCA
from sklearn.feature_selection import SelectKBest, chi2
# Setup the features
extractor = FeatureExtract()
labels = extractor.labels
works = extractor.works
labels[labels == -1] = 0
feature1 = extractor.feature_cp[0]
feature_shell = []
length = [2, 3]
for l in length:
folder = '../shell/length%d_no_mirror' % l
dict_list = []
for work in works:
data = pickle.load(open('%s/%s.pkl' % (folder, work), 'rb'))
dict_list.append(data)
feature, names = extractor._vectorize(dict_list)
feature_shell.append(feature)
feature_shell = np.hstack(feature_shell)
normalizer = TfidfTransformer()
feature1 = normalizer.fit_transform(feature1).toarray()
feature2 = normalizer.fit_transform(feature_shell).toarray()
SVD1 = TruncatedSVD(n_components=300)
SVD2 = TruncatedSVD(n_components=200)
feature1 = SVD1.fit_transform(feature1)
feature2 = SVD2.fit_transform(feature2)
feature = np.hstack((feature1, feature2))
feature_unsec = feature[labels == 2]
feature = feature[labels != 2]
labels = labels[labels != 2]
#def validate(clf, feature, k):
# sfk = cv.StratifiedShuffleSplit(labels, 100)
# scores = []
# for train, test in sfk:
# score = []
# train_set = feature[train]
# test_set = feature[test]
# clf.fit(train_set, labels[train])
# pred = clf.predict(test_set)
# score.append(accuracy_score(labels[test], pred))
# score.append(precision_score(labels[test], pred))
# score.append(recall_score(labels[test], pred))
# score.append(f1_score(labels[test], pred))
# scores.append(score)
# avg = np.average(scores, axis=0)
# return avg
##print validate(SVC(kernel='poly', degree=2, C=1000000), feature, 500)
#print validate(SVC(C=10000, gamma=0.75), feature, 500)
#print validate(LinearSVC(C=100), feature, 500)
#print validate(LogisticRegression(C=100), feature, 500)
sfk = cv.StratifiedShuffleSplit(labels, 500)
clf1 = SVC(C=10000, gamma=0.75, probability=True)
all_prob = []
all_label = []
for train, test in sfk:
clf1.fit(feature[train], labels[train])
prob = clf1.predict_proba(feature[test])[:, 1]
all_prob.append(prob)
all_label.append(labels[test])
all_label = np.hstack(all_label)
all_prob = np.hstack(all_prob)
precision, recall, thresholds = precision_recall_curve(all_label, all_prob)
area = auc(recall, precision)
F1 = (precision * recall) / (precision + recall) * 2
np.savetxt('pr_svm.txt',np.column_stack((precision[:-1], recall[:-1], thresholds)), fmt='%4f')
#plt.figure(figsize=(3, 2.25))
#plt.plot(recall, precision)
#plt.xlabel('Recall')
#plt.ylabel('Precision')
#plt.title('Precision-Recall, SVM, AUC=%0.2f' % area)
#plt.xlim(0, 1)
#plt.ylim(0, 1.05)
#plt.tight_layout()
plt.figure(figsize=(3, 2.25))
plt.plot(thresholds, precision[:-1], label='Precision')
plt.plot(thresholds, recall[:-1], label='Recall')
plt.plot(thresholds, F1[:-1], label='F1')
plt.title('SVM')
plt.xlabel('Threshold')
plt.legend(loc=3, fontsize=8)
plt.tight_layout()
plt.savefig('prob_pr_svm.png', dpi=600)
plt.savefig('prob_pr_svm.pdf')
sfk = cv.StratifiedShuffleSplit(labels, 500)
clf = LogisticRegression(C=100)
all_prob = []
all_label = []
for train, test in sfk:
clf.fit(feature[train], labels[train])
prob = clf.predict_proba(feature[test])[:, 1]
all_prob.append(prob)
all_label.append(labels[test])
all_label = np.hstack(all_label)
all_prob = np.hstack(all_prob)
precision, recall, thresholds = precision_recall_curve(all_label, all_prob)
area = auc(recall, precision)
F1 = (precision * recall) / (precision + recall) * 2
#
#plt.figure(figsize=(3, 2.25))
#plt.plot(recall, precision)
#plt.xlabel('Recall')
#plt.ylabel('Precision')
#plt.title('Precision-Recall, Logistic, AUC=%0.2f' % area)
#plt.xlim(0, 1)
#plt.ylim(0, 1.05)
#plt.tight_layout()
#plt.savefig('pr_logi.png')
#plt.savefig('pr_logi.pdf')
plt.figure(figsize=(3, 2.25))
plt.plot(thresholds, precision[:-1], label='Precision')
plt.plot(thresholds, recall[:-1], label='Recall')
plt.plot(thresholds, F1[:-1], label='F1')
plt.xlabel('Threshold')
plt.title('Logistic')
plt.legend(loc=3, fontsize=8)
plt.tight_layout()
plt.savefig('prob_pr_logi.png', dpi=600)
plt.savefig('prob_pr_logi.pdf')
# Plot the learning curve
#sfk = cv.StratifiedKFold(labels, 10)
#sel_idx = []
#data_size = []
#for train, test in sfk:
# if not sel_idx:
# sel_idx.append(test)
# else:
# new_idx = np.append(sel_idx[-1], test)
# sel_idx.append(new_idx)
# data_size.append(sel_idx[-1].shape[0])
#
#
#n = 3
#sel_idx = sel_idx[n:]
#data_size = data_size[n:]
#
#
#def get_learning_curve(clf, feature):
# test_scores = []
# train_scores = []
# for idx in sel_idx:
# feature_curr = feature[idx]
# labels_curr = labels[idx]
# # test error
# sfk = cv.StratifiedShuffleSplit(labels_curr, 500)
# score = cv.cross_val_score(
# clf, feature_curr, labels_curr, 'f1', sfk, n_jobs=8)
# test_scores.append(np.average(score))
# # training error
# clf.fit(feature_curr, labels_curr)
# pred = clf.predict(feature_curr)
# train_scores.append(f1_score(labels_curr, pred))
# return test_scores, train_scores
#
#fig = plt.figure(figsize=(6, 2.25))
#
#
#def plot(clf, feature, idx, title):
# fig.add_subplot(1, 2, idx)
# test_scores, train_scores = get_learning_curve(clf, feature)
# plt.plot(data_size, test_scores, 'r-o')
# plt.plot(data_size, train_scores, 'k-o')
# plt.xlabel('Dataset size')
# plt.ylabel('F1')
# plt.title(title)
# plt.tight_layout()
# plt.ylim(0.8, 1.05)
#
#clf1 = LogisticRegression(C=100)
#clf2 = SVC(C=10000, gamma=0.75)
#plot(clf1, feature, 1, 'SVM')
#plot(clf2, feature, 2, 'Logistic')
#plt.show()
| apache-2.0 |
LSSTDESC/LSSTDarkMatter | satellites/dwarf_resolution.py | 1 | 1619 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from ugali.analysis.isochrone import factory as isochrone_factory
from ugali.utils.projector import dist2mod, mod2dist
mag_lim = 26.5
rad_to_arcsec = 206265.
age = 12.
z = 0.0001 # feh = -2.1
dist = np.array([50, 100, 500, 1000, 5000, 10000]) # kpc
m_star = 10**np.arange(3, 7.5, 0.5) # Msun
r50 = np.logspace(0, 3, 50) # pc
M, R = np.meshgrid(m_star, r50)
for d in dist:
print 'd = %.2f' % d
m_plt = []
r_plt = []
# get isochrone
mu = dist2mod(d)
iso = isochrone_factory('Padova', age=age, distance_modulus=mu, z=z)
for m in m_star:
print 'm = %.2e' % m
# calculate number of stars above mag lim
g, r = iso.simulate(m)
g = g[g < mag_lim]
r = r[r < mag_lim]
nstars = len(g) / 2 # use g for now
# calculate stellar density
area = 2 * np.pi * (r50/(d*1000) * rad_to_arcsec)**2
angular_density = nstars / area # stars / pc^2
r_det = r50[(angular_density < 1.0) & (nstars > 20)]
r_plt.extend(r_det)
m_plt.extend([m] * len(r_det))
print 'ndwarfs = %i' % len(r_det)
plt.figure()
plt.scatter(M, R, facecolors='none', edgecolor='darkslateblue', s=50)
plt.scatter(m_plt, r_plt, color='darkslateblue', edgecolor='none', s=50)
plt.xlabel('L (Lsun)')
plt.ylabel('r50 (pc)')
plt.xscale('log')
plt.yscale('log')
plt.xlim(1e2, 1e8)
plt.ylim(r50.min() - 100, r50.max() + 100)
plt.title('Distance = %.2d kpc' % d)
plt.savefig('resolvable_d_%.2f.png' %d)
| mit |
wathen/PhD | MHD/FEniCS/ShiftCurlCurl/CppGradient/PETScTest1.py | 1 | 12565 | import os, inspect
from dolfin import *
import numpy
from scipy.sparse import coo_matrix, block_diag, hstack, tril, spdiags, csr_matrix
import ExactSol
from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
from PyTrilinos import Epetra, ML, AztecOO, Teuchos
import MatrixOperations as MO
import matplotlib.pylab as plt
import CheckPetsc4py as CP
import petsc4py
import sys
import HiptmairPrecond
import HiptmairApply
petsc4py.init(sys.argv)
from petsc4py import PETSc
path = os.path.abspath(os.path.join(inspect.getfile(inspect.currentframe()), ".."))
gradient_code = open(os.path.join(path, 'DiscreteGradient.cpp'), 'r').read()
compiled_gradient_module = compile_extension_module(code=gradient_code)
m = 8
errL2b =numpy.zeros((m-1,1))
errCurlb =numpy.zeros((m-1,1))
l2border = numpy.zeros((m-1,1))
Curlborder =numpy.zeros((m-1,1))
ItsSave = numpy.zeros((m-1,1))
DimSave = numpy.zeros((m-1,1))
TimeSave = numpy.zeros((m-1,1))
NN = numpy.zeros((m-1,1))
Curlgrad = numpy.zeros((m-1,1))
Massgrad = numpy.zeros((m-1,1))
Laplgrad = numpy.zeros((m-1,1))
dim = 3
for xx in xrange(1,m):
NN[xx-1] = xx+0
nn = int(2**(NN[xx-1][0]))
omega = 1
if dim == 2:
# mesh = UnitSquareMesh(int(nn),int(nn))
mesh = RectangleMesh(0.0, 0.0, 1.0, 1.5, int(nn), int(nn), 'left')
u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M2D(2,Show="yes", Mass = omega)
else:
mesh = UnitCubeMesh(int(nn),int(nn),int(nn))
u0, p0, CurlCurl, gradPres, CurlMass = ExactSol.M3D(1,Show="yes", Mass = omega)
order = 1
parameters['reorder_dofs_serial'] = False
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
VLagrange = VectorFunctionSpace(mesh, "CG", order)
parameters['reorder_dofs_serial'] = False
W = Magnetic*Lagrange
DimSave[xx-1] = Magnetic.dim()
print Magnetic.dim()
parameters['linear_algebra_backend'] = 'uBLAS'
column = numpy.zeros(2*mesh.num_edges(), order="C") #, dtype="intc")
row = numpy.zeros(2*mesh.num_edges(), order="C") #, dtype="intc")
data = numpy.zeros(2*mesh.num_edges(), order="C") #, dtype="intc")
# Mapping = dof_to_vertex_map(Lagrange)
# c = compiled_gradient_module.Gradient(Magnetic, Mapping.astype("intc"),column,row,data)
dataX = numpy.zeros(2*mesh.num_edges(), order="C")
dataY = numpy.zeros(2*mesh.num_edges(), order="C")
dataZ = numpy.zeros(2*mesh.num_edges(), order="C")
tic()
c = compiled_gradient_module.ProlongationGradsecond(mesh, dataX,dataY,dataZ, data, row, column)
print "C++ time:", toc()
C = coo_matrix((data,(row,column)), shape=(Magnetic.dim(),Lagrange.dim())).tocsr()
# AA = Matrix()
# AA.set(data.astype("float_"),row.astype("intc"),column.astype("intc"))
# print AA.sparray().todense()
Px = coo_matrix((dataX,(row,column)), shape=(Magnetic.dim(),Lagrange.dim())).tocsr()
Py = coo_matrix((dataY,(row,column)), shape=(Magnetic.dim(),Lagrange.dim())).tocsr()
Pz = coo_matrix((dataZ,(row,column)), shape=(Magnetic.dim(),Lagrange.dim())).tocsr()
Px.eliminate_zeros()
Py.eliminate_zeros()
Pz.eliminate_zeros()
if Magnetic.dim() == 8001:
VertexDoF = numpy.sin(numpy.linspace(0.0, 2*numpy.pi, num=mesh.num_vertices()))
EdgeDoFX = Px*VertexDoF
EdgeDoFY = Py*VertexDoF
EEX = Function(Magnetic)
EEY = Function(Magnetic)
VV = Function(Lagrange)
VV.vector()[:] = VertexDoF
EEX.vector()[:] = EdgeDoFX
EEY.vector()[:] = EdgeDoFY
# file = File("Plots/MagneticXdirection_"+str(Magnetic.dim())+".pvd")
# file << EEX
# file = File("Plots/MagneticYdirection_"+str(Magnetic.dim())+".pvd")
# file << EEY
# file = File("Plots/Nodal_"+str(Magnetic.dim())+".pvd")
# file << VV
plot(EEX,tite="Magnetic interpolation X-direction")
plot(EEY,tite="Magnetic interpolation Y-direction")
plot(VV,tite="Nodal represetation")
def boundary(x, on_boundary):
return on_boundary
bc = DirichletBC(Magnetic,u0, boundary)
bcu = DirichletBC(Lagrange, Expression(("0.0")), boundary)
if dim == 3:
bcW = DirichletBC(W.sub(0),Expression(("1.0","1.0","1.0")), boundary)
else:
bcW = DirichletBC(W.sub(0),Expression(("1.0","1.0")), boundary)
bcuW = DirichletBC(W.sub(1), Expression(("1.0")), boundary)
(v) = TestFunction(Magnetic)
(u) = TrialFunction(Magnetic)
(p) = TrialFunction(Lagrange)
(q) = TestFunction(Lagrange)
(Vp) = TrialFunction(VLagrange)
(Vq) = TestFunction(VLagrange)
Wv,Wq=TestFunctions(W)
Wu,Wp=TrialFunctions(W)
NS_is = PETSc.IS().createGeneral(range(Magnetic.dim()))
M_is = PETSc.IS().createGeneral(range(Magnetic.dim()+Lagrange.dim(),W.dim()))
# AA = assemble(inner(grad(Wp),Wv)*dx+inner(grad(Wq),Wu)*dx)
tic()
AA = Function(W).vector()
AA[:] = 0
# AA.zero()
# plt.subplot(1, 3, 1)
# plt.spy(AA.sparray())
bcW.apply(AA)
# plt.subplot(1, 3, 2)
# plt.spy(AA.sparray())
# # plt.spy(AA.sparray())
# # # plt.figure()
# # # # AA.zero()
bcuW.apply(AA)
# plt.subplot(1, 3, 3)
# plt.spy(AA.sparray())
# plt.show()
# aa
diagAA = AA.array()
print toc()
# print diagAAdd
# diagAA = AA.diagonal()
BClagrange = numpy.abs(diagAA[Magnetic.dim():]) > 1e-3
BCmagnetic = numpy.abs(diagAA[:Magnetic.dim()]) > 1e-3
onelagrange = numpy.ones(Lagrange.dim())
onemagnetic = numpy.ones(Magnetic.dim())
onelagrange[BClagrange] = 0
onemagnetic[BCmagnetic] = 0
Diaglagrange = spdiags(onelagrange,0,Lagrange.dim(),Lagrange.dim())
Diagmagnetic = spdiags(onemagnetic,0,Magnetic.dim(),Magnetic.dim())
# plt.spy(C*Diag)
# plt.figure()
plt.spy(Diagmagnetic)
# plt.show()
tic()
C = Diagmagnetic*C*Diaglagrange
print "BC applied to gradient, time: ", toc()
# print C
# BC = ~numpy.array(BC)
# tic
# CC = C
# ii,jj = C[:,BC].nonzero()
# C[ii,jj] = 0
# print (CC-C).todense()
# # print C
# Curl = assemble(inner(curl(u),curl(v))*dx)
# Mass = assemble(inner(u,v)*dx)
# Grad = assemble(inner(grad(p),v)*dx)
# Laplacian = assemble(inner(grad(q),grad(p))*dx)
# # plt.spy(C)
# # plt.show()
# bc.apply(Curl)
# bc.apply(Mass)
# bcu.apply(Laplacian)
# print "========================================"
# Curlgrad[xx-1]=(Curl.sparray()*C).max()
# print " Curl-gradient test: ", Curlgrad[xx-1]
# Massgrad[xx-1]=(Mass.sparray()*C-Diagmagnetic*Grad.sparray()*Diaglagrange).max()
# print " Mass-gradient test: ", Massgrad[xx-1]
# Laplgrad[xx-1]=(Diaglagrange*Grad.sparray().transpose()*Diagmagnetic*C-Laplacian.sparray()).max()
# print " Lapl-gradient test: ", Laplgrad[xx-1]
# print "========================================"
# # # Grad = AA.
# Grad.zero()
# bcu.apply(Grad)
G = PETSc.Mat().createAIJ(size=C.shape,csr=(C.indptr, C.indices, C.data))
a = inner(curl(u),curl(v))*dx + inner(u,v)*dx
L1 = inner(v, CurlMass)*dx
tic()
Acurl,b = assemble_system(a,L1,bc)
print "System assembled, time: ", toc()
tic()
A,b = CP.Assemble(Acurl,b)
x = b.duplicate()
print "PETSc system assembled, time: ", toc()
tic()
ScalarLaplacian = assemble(inner(grad(p),grad(q))*dx)
VectorLaplacian = assemble(inner(grad(p),grad(q))*dx+inner(p,q)*dx)
print "Hiptmair Laplacians assembled, time: ", toc()
# tic()
# Pxx = Px
# Pyy = Py
# ii,jj = Px[:,BC].nonzero()
# Px[ii,jj] = 0
# ii,jj = Px[:,BC].nonzero()
# Py[ii,jj] = 0
# print toc()
# print (Px).todense()
# print (Pxx).todense()
if dim == 2:
tic()
Px = Diagmagnetic*Px*Diaglagrange
Py = Diagmagnetic*Py*Diaglagrange
print "BC applied to Prolongation, time: ", toc()
# P = hstack([Px,Py]).tocsr()
bcVu = DirichletBC(VLagrange, Expression(("0.0","0.0")), boundary)
P = [PETSc.Mat().createAIJ(size=Px.shape,csr=(Px.indptr, Px.indices, Px.data)),PETSc.Mat().createAIJ(size=Py.shape,csr=(Py.indptr, Py.indices, Py.data))]
else:
tic()
Px = Diagmagnetic*Px*Diaglagrange
Py = Diagmagnetic*Py*Diaglagrange
Pz = Diagmagnetic*Pz*Diaglagrange
print "BC applied to Prolongation, time: ", toc()
bcVu = DirichletBC(VLagrange, Expression(("0.0","0.0","0.0")), boundary)
P = [PETSc.Mat().createAIJ(size=Px.shape,csr=(Px.indptr, Px.indices, Px.data)),PETSc.Mat().createAIJ(size=Py.shape,csr=(Py.indptr, Py.indices, Py.data)),PETSc.Mat().createAIJ(size=Pz.shape,csr=(Pz.indptr, Pz.indices, Pz.data))]
tic()
bcu.apply(VectorLaplacian)
bcu.apply(ScalarLaplacian)
print "Hiptmair Laplacians BC applied, time: ", toc()
# Pt = P.transpose().tocsr()
# Pt = PETSc.Mat().createAIJ(size=Pt.shape,csr=(Pt.indptr, Pt.indices, Pt.data))
# P = PETSc.Mat().createAIJ(size=P.shape,csr=(P.indptr, P.indices, P.data))
VectorLaplacian = PETSc.Mat().createAIJ(size=VectorLaplacian.sparray().shape,csr=(VectorLaplacian.sparray().indptr, VectorLaplacian.sparray().indices, VectorLaplacian.sparray().data))
ScalarLaplacian = PETSc.Mat().createAIJ(size=ScalarLaplacian.sparray().shape,csr=(ScalarLaplacian.sparray().indptr, ScalarLaplacian.sparray().indices, ScalarLaplacian.sparray().data))
# CurlCurl = assemble(inner(curl(u),curl(v))*dx+inner(u,v)*dx)
# # bc.apply(CurlCurl)
# CurlCurlPetsc = CP.Assemble(CurlCurl)
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-6)
ksp.setType('cg')
ksp.setOperators(A,A)
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
# pc.setPythonContext(HiptmairPrecond.Direct(G, Gt, P, Pt, VectorLaplacian, ScalarLaplacian))
Lower = tril(Acurl.sparray()).tocsr()
Upper = Lower.transpose().tocsr()
Lower = PETSc.Mat().createAIJ(size=Lower.shape,csr=(Lower.indptr, Lower.indices, Lower.data))
Upper = PETSc.Mat().createAIJ(size=Upper.shape,csr=(Upper.indptr, Upper.indices, Upper.data))
pc.setPythonContext(HiptmairPrecond.GSvector(G, P, VectorLaplacian, ScalarLaplacian, Lower, Upper))
scale = b.norm()
b = b/scale
tic()
ksp.solve(b, x)
TimeSave[xx-1] = toc()
x = x*scale
print ksp.its
print TimeSave[xx-1]
ItsSave[xx-1] = ksp.its
xa = Function(Magnetic)
# x = b.duplicate()
# tic()
# # x, i = HiptmairApply.cg(A, b, x,VectorLaplacian ,ScalarLaplacian , P, G, Magnetic, bc, 1000)
# print toc()
# x = x*scale
xa.vector()[:] = x.array
ue = u0
pe = p0
# parameters["form_compiler"]["quadrature_degree"] = 15
Ve = FunctionSpace(mesh,"N1curl",2)
u = interpolate(ue,Ve)
print "======================", i
ErrorB = Function(Magnetic)
ErrorB = u-xa
errL2b[xx-1] = sqrt(abs(assemble(inner(ErrorB, ErrorB)*dx)))
errCurlb[xx-1] = sqrt(abs(assemble(inner(curl(ErrorB), curl(ErrorB))*dx)))
if xx == 1:
a = 1
else:
l2border[xx-1] = numpy.abs(numpy.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = numpy.abs(numpy.log2(errCurlb[xx-2]/errCurlb[xx-1]))
print errL2b[xx-1]
print errCurlb[xx-1]
print " \n\n\n\n"
import pandas as pd
# plot(xa)
# plot(u)
LatexTitlesB = ["l","B DoF","BB-L2","B-order","BB-Curl","Curl-order"]
LatexValuesB = numpy.concatenate((NN,DimSave,errL2b,l2border,errCurlb,Curlborder),axis=1)
LatexTableB= pd.DataFrame(LatexValuesB, columns = LatexTitlesB)
pd.set_option('precision',3)
LatexTableB = MO.PandasFormat(LatexTableB,'BB-Curl',"%2.4e")
LatexTableB = MO.PandasFormat(LatexTableB,'BB-L2',"%2.4e")
LatexTableB = MO.PandasFormat(LatexTableB,'Curl-order',"%2.2f")
LatexTableB = MO.PandasFormat(LatexTableB,'B-order',"%2.2f")
print LatexTableB#.to_latex()
print "\n\n\n"
ItsTitlesB = ["l","B DoF","Time","Iterations"]
ItsValuesB = numpy.concatenate((NN,DimSave,TimeSave,ItsSave),axis=1)
ItsTableB= pd.DataFrame(ItsValuesB, columns = ItsTitlesB)
pd.set_option('precision',5)
print ItsTableB.to_latex()
interactive()
GradTests = ["l","B DoF","$AC$","$MC-B^T$","$BC-L$"]
GradValues = numpy.concatenate((NN,DimSave,Curlgrad, Massgrad, Laplgrad),axis=1)
GradTab= pd.DataFrame(GradValues, columns = GradTests)
pd.set_option('precision',3)
GradTab = MO.PandasFormat(GradTab,'$AC$',"%2.4e")
GradTab = MO.PandasFormat(GradTab,'$MC-B^T$',"%2.4e")
GradTab = MO.PandasFormat(GradTab,'$BC-L$',"%2.4e")
print GradTab.to_latex()
# Curlgrad, Massgrad, Laplgrad
| mit |
wanggang3333/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <[email protected]>
# Olivier Grisel <[email protected]>
# Michael Becker <[email protected]>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
StuartLittlefair/astropy | examples/io/plot_fits-image.py | 11 | 1898 | # -*- coding: utf-8 -*-
"""
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
| bsd-3-clause |
emmjaykay/reddit-faqizer | faq.py | 1 | 6166 | #!/usr/bin/env python
import praw
import pickle
import sys
import requests
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn import decomposition
from gensim import corpora, models, similarities
import pprint
import argparse
corpus = []
def fetchFromFileAndSave(f, url):
comments = fetchFromUrl(url)
saveToFile(f, comments)
return comments
def saveToFile(f,comments):
pickle.dump( comments, open(f, "wb"))
def fetchFromFile(f):
comments = pickle.load(open(f, 'r'))
return comments
pass
def fetchFromUrl(url):
r = praw.Reddit(user_agent='example')
amy = None
try:
amy = r.get_submission(url)
except (requests.exceptions.MissingSchema):
print "%s is not a valid schema" % (url)
sys.exit()
except (requests.exceptions.ConnectionError):
print "%s couldn't be connected " % (url)
sys.exit()
except (requests.exceptions.InvalidURL):
print "Invalid URL"
sys.exit()
except (requests.exceptions.HTTPError):
print "Couldn't find %s" % (url)
sys.exit()
if amy is None:
print "Unknown error with URL fetching!"
sys.exit()
print "Starting to download all comments for this submission (takes time)"
amy.replace_more_comments(limit=None, threshold=0)
comments = []
for c in amy.comments:
if c.parent_id.find(amy.id) == -1:
continue
if type(c) == praw.objects.MoreComments:
continue
a = ' '.join(c.body.splitlines())
comments.append(a)
return comments
pass
def dbscan_(X):
sys.stdout.write("Preparing to create DBSCAN...")
sys.stdout.flush()
km = DBSCAN(eps=.05, min_samples=1)
sys.stdout.write("done!\n")
sys.stdout.write("Running fit()...")
sys.stdout.flush()
km.fit(X)
sys.stdout.write("done!\n")
res = {}
for i in range(len(corpus)):
label = km.labels_[i]
if res.has_key(label):
if corpus[i] not in res[label]:
res[label].append(corpus[i])
else:
res[label] = []
res[label].append(corpus[i])
for i in res.keys():
if len(res[i]) > 1:
pprint.pprint(res[i])
def gensim_(documents):
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
dictionary = corpora.Dictionary(texts)
print(dictionary.token2id)
def nmf_(X):
sys.stdout.write("Preparing to run NMF ... ")
sys.stdout.flush()
X[X < 0] = 0.0
km = decomposition.NMF(n_components=10, random_state=1)
sys.stdout.write("done!\n")
sys.stdout.write("Running fit()...")
sys.stdout.flush()
km.fit(X)
sys.stdout.write("done!\n")
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(km.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
def nn_(X):
pass
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Download and find duplicate questions for .')
parser.add_argument('-n', metavar='m_ngram', type=str, nargs='?', help="Maximum n-gran number")
parser.add_argument('-u', metavar='url', type=str, nargs='?', help="URL to fetch comments from")
parser.add_argument('-f', metavar='f', type=str, nargs='?', help="Pickle File to fetch list of comments from")
parser.add_argument('-F', metavar='F', type=str, nargs='*', help="Use NMF")
parser.add_argument('-g', action='store_true', help="Use gensim")
args = parser.parse_args()
print args
m_ngram = 4
comments = []
sys.stdout.write("Preparing to collect stopwords...")
sys.stdout.flush()
stop = stopwords.words('english')
stop.append("Hi amy")
stop.append("when is")
stop.append("how does")
stop.append("&")
sys.stdout.write("done!\n")
sys.stdout.write("Preparing to collect data from source...")
sys.stdout.flush()
if args.f is not None and args.u is not None:
comments = fetchFromFileAndSave(args.f, args.u)
elif args.f is not None:
comments = fetchFromFile(args.f)
elif args.u is not None:
comments = fetchFromUrl(args.u)
if args.n is not None:
m_ngram = int(args.n)
sys.stdout.write("done!\n")
sys.stdout.write("Preparing to collect comments...")
sys.stdout.flush()
for c in comments:
a = ' '.join(c.splitlines())
tok = word_tokenize(a)
tok_stopped = []
stopped = []
for word in tok:
if word not in stop:
tok_stopped.append(word)
corpus.append(a)
sys.stdout.write("done!\n")
sys.stdout.write("Preparing to create TFIDF vector...")
sys.stdout.flush()
vectorizer = TfidfVectorizer(ngram_range=(2,m_ngram),
stop_words=stop,
max_df=0.95,
min_df=2,
max_features=1000)
tfidf = vectorizer.fit_transform(corpus)
tfidf = tfidf * tfidf.T
sys.stdout.write("done!\n")
sys.stdout.write("Preparing to scale date...")
sys.stdout.flush()
X = StandardScaler().fit_transform(tfidf.todense())
sys.stdout.write("done!\n")
if args.g is True:
gensim_(comments)
elif args.F is None:
pca = decomposition.PCA()
pca.fit(X)
X = pca.transform(X)
dbscan_(X)
else:
nmf_(X)
| gpl-3.0 |
aabadie/scikit-learn | sklearn/utils/tests/test_metaestimators.py | 3 | 2293 | from nose.tools import assert_true, assert_false
from sklearn.utils.metaestimators import if_delegate_has_method
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
class MetaEst(object):
"""A mock meta estimator"""
def __init__(self, sub_est, better_sub_est=None):
self.sub_est = sub_est
self.better_sub_est = better_sub_est
@if_delegate_has_method(delegate='sub_est')
def predict(self):
pass
class MetaEstTestTuple(MetaEst):
"""A mock meta estimator to test passing a tuple of delegates"""
@if_delegate_has_method(delegate=('sub_est', 'better_sub_est'))
def predict(self):
pass
class MetaEstTestList(MetaEst):
"""A mock meta estimator to test passing a list of delegates"""
@if_delegate_has_method(delegate=['sub_est', 'better_sub_est'])
def predict(self):
pass
class HasPredict(object):
"""A mock sub-estimator with predict method"""
def predict(self):
pass
class HasNoPredict(object):
"""A mock sub-estimator with no predict method"""
pass
def test_if_delegate_has_method():
assert_true(hasattr(MetaEst(HasPredict()), 'predict'))
assert_false(hasattr(MetaEst(HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestList(HasPredict(), HasPredict()), 'predict'))
| bsd-3-clause |
nesterione/scikit-learn | sklearn/mixture/gmm.py | 128 | 31069 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <[email protected]>
# Fabian Pedregosa <[email protected]>
# Bertrand Thirion <[email protected]>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
q1ang/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/matplotlib/delaunay/interpolate.py | 21 | 7262 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid
from matplotlib._delaunay import nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation)
we can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point
of the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(
x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also
construct what is called the Voronoi diagram from a Delaunay triangulation
by connecting the circumcenters of the triangles to those of their
neighbors to form a tesselation of irregular polygons covering the plane
and containing only one node from the triangulation. Each point in one
node's Voronoi polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors
of this point as the set of nodes participating in Delaunay triangles
whose circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronoi diagram would have a polygon around the
inserted point. This polygon would "steal" area from the original Voronoi
polygons. For each node i in the natural neighbors set, we compute the
area stolen from its original Voronoi polygon, stolen[i]. We define the
natural neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered
by the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(
x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| gpl-3.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/io/test_packers.py | 3 | 31546 | import pytest
from warnings import catch_warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.errors import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT
from pandas._libs.tslib import iNaT
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
@pytest.fixture(scope='module')
def current_packers_data():
# our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_msgpack_data)
return create_msgpack_data()
@pytest.fixture(scope='module')
def all_packers_data():
# our all of our current version packers data
from pandas.tests.io.generate_legacy_storage_files import (
create_data)
return create_data()
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(object):
def setup_method(self, method):
self.path = '__%s__.msg' % tm.rands(10)
def teardown_method(self, method):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
pytest.raises(ValueError, read_msgpack, path_or_buf=None)
pytest.raises(ValueError, read_msgpack, path_or_buf={})
pytest.raises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
pytest.skip('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
assert np.allclose(x, x_rec)
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="complex value")
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
tm.assert_dict_equal(x, x_rec)
for key in x:
tm.assert_class_equal(x[key], x_rec[key], obj="numpy complex128")
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
assert (all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_nat(self):
nat_rec = self.encode_decode(NaT)
assert NaT is nat_rec
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
pytest.skip('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
assert i == i_rec
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
assert i == i_rec
class TestIndex(TestPackers):
def setup_method(self, method):
super(TestIndex, self).setup_method(method)
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
'cat': tm.makeCategoricalIndex(100)
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
tm.assert_index_equal(i, i_rec)
def categorical_index(self):
# GH15487
df = DataFrame(np.random.randn(10, 2))
df = df.astype({0: 'category'}).set_index(0)
result = self.encode_decode(df)
tm.assert_frame_equal(result, df)
class TestSeries(TestPackers):
def setup_method(self, method):
super(TestSeries, self).setup_method(method)
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
'H': Categorical([1, 2, 3, 4, 5]),
'I': Categorical([1, 2, 3, 4, 5], ordered=True),
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
self.d['cat_ordered'] = Series(data['H'])
self.d['cat_unordered'] = Series(data['I'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setup_method(self, method):
super(TestCategorical, self).setup_method(method)
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setup_method(self, method):
super(TestNDFrame, self).setup_method(method)
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
with catch_warnings(record=True):
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
with catch_warnings(record=True):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
assert isinstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
df = DataFrame([1, 2], index=date_range('1/1/2013', '1/2/2013'))
result = self.encode_decode(df)
assert_frame_equal(result, df)
def test_dataframe_duplicate_column_names(self):
# GH 9618
expected_1 = DataFrame(columns=['a', 'a'])
expected_2 = DataFrame(columns=[1] * 100)
expected_2.loc[0] = np.random.randn(100)
expected_3 = DataFrame(columns=[1, 1])
expected_3.loc[0] = ['abc', np.nan]
result_1 = self.encode_decode(expected_1)
result_2 = self.encode_decode(expected_2)
result_3 = self.encode_decode(expected_3)
assert_frame_equal(result_1, expected_1)
assert_frame_equal(result_2, expected_2)
assert_frame_equal(result_3, expected_3)
class TestSparse(TestPackers):
def _check_roundtrip(self, obj, comparator, **kwargs):
# currently these are not implemetned
# i_rec = self.encode_decode(obj)
# comparator(obj, i_rec, **kwargs)
pytest.raises(NotImplementedError, self.encode_decode, obj)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.loc[3:5, 1:3] = np.nan
s.loc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
class TestCompression(TestPackers):
"""See https://github.com/pandas-dev/pandas/pull/9783
"""
def setup_method(self, method):
try:
from sqlalchemy import create_engine
self._create_sql_engine = create_engine
except ImportError:
self._SQLALCHEMY_INSTALLED = False
else:
self._SQLALCHEMY_INSTALLED = True
super(TestCompression, self).setup_method(method)
data = {
'A': np.arange(1000, dtype=np.float64),
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
def test_plain(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
def _test_compression(self, compress):
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames
for block in value._data.blocks:
assert block.values.flags.writeable
def test_compression_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression('zlib')
def test_compression_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression('blosc')
def _test_compression_warns_when_decompress_caches(self, compress):
not_garbage = []
control = [] # copied data
compress_module = globals()[compress]
real_decompress = compress_module.decompress
def decompress(ob):
"""mock decompress function that delegates to the real
decompress but caches the result and a copy of the result.
"""
res = real_decompress(ob)
not_garbage.append(res) # hold a reference to this bytes object
control.append(bytearray(res)) # copy the data here to check later
return res
# types mapped to values to add in place.
rhs = {
np.dtype('float64'): 1.0,
np.dtype('int32'): 1,
np.dtype('object'): 'a',
np.dtype('datetime64[ns]'): np.timedelta64(1, 'ns'),
np.dtype('timedelta64[ns]'): np.timedelta64(1, 'ns'),
}
with patch(compress_module, 'decompress', decompress), \
tm.assert_produces_warning(PerformanceWarning) as ws:
i_rec = self.encode_decode(self.frame, compress=compress)
for k in self.frame.keys():
value = i_rec[k]
expected = self.frame[k]
assert_frame_equal(value, expected)
# make sure that we can write to the new frames even though
# we needed to copy the data
for block in value._data.blocks:
assert block.values.flags.writeable
# mutate the data in some way
block.values[0] += rhs[block.dtype]
for w in ws:
# check the messages from our warnings
assert str(w.message) == ('copying data after decompressing; '
'this may mean that decompress is '
'caching its result')
for buf, control_buf in zip(not_garbage, control):
# make sure none of our mutations above affected the
# original buffers
assert buf == control_buf
def test_compression_warns_when_decompress_caches_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_compression_warns_when_decompress_caches('zlib')
def test_compression_warns_when_decompress_caches_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_compression_warns_when_decompress_caches('blosc')
def _test_small_strings_no_warn(self, compress):
empty = np.array([], dtype='uint8')
with tm.assert_produces_warning(None):
empty_unpacked = self.encode_decode(empty, compress=compress)
tm.assert_numpy_array_equal(empty_unpacked, empty)
assert empty_unpacked.flags.writeable
char = np.array([ord(b'a')], dtype='uint8')
with tm.assert_produces_warning(None):
char_unpacked = self.encode_decode(char, compress=compress)
tm.assert_numpy_array_equal(char_unpacked, char)
assert char_unpacked.flags.writeable
# if this test fails I am sorry because the interpreter is now in a
# bad state where b'a' points to 98 == ord(b'b').
char_unpacked[0] = ord(b'b')
# we compare the ord of bytes b'a' with unicode u'a' because the should
# always be the same (unless we were able to mutate the shared
# character singleton in which case ord(b'a') == ord(b'b').
assert ord(b'a') == ord(u'a')
tm.assert_numpy_array_equal(
char_unpacked,
np.array([ord(b'b')], dtype='uint8'),
)
def test_small_strings_no_warn_zlib(self):
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
self._test_small_strings_no_warn('zlib')
def test_small_strings_no_warn_blosc(self):
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
self._test_small_strings_no_warn('blosc')
def test_readonly_axis_blosc(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='blosc')
assert 1. in self.encode_decode(df2['A'], compress='blosc')
def test_readonly_axis_zlib(self):
# GH11880
df1 = DataFrame({'A': list('abcd')})
df2 = DataFrame(df1, index=[1., 2., 3., 4.])
assert 1 in self.encode_decode(df1['A'], compress='zlib')
assert 1. in self.encode_decode(df2['A'], compress='zlib')
def test_readonly_axis_blosc_to_sql(self):
# GH11880
if not _BLOSC_INSTALLED:
pytest.skip('no blosc')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='blosc')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
def test_readonly_axis_zlib_to_sql(self):
# GH11880
if not _ZLIB_INSTALLED:
pytest.skip('no zlib')
if not self._SQLALCHEMY_INSTALLED:
pytest.skip('no sqlalchemy')
expected = DataFrame({'A': list('abcd')})
df = self.encode_decode(expected, compress='zlib')
eng = self._create_sql_engine("sqlite:///:memory:")
df.to_sql('test', eng, if_exists='append')
result = pandas.read_sql_table('test', eng, index_col='index')
result.index.names = [None]
assert_frame_equal(expected, result)
class TestEncoding(TestPackers):
def setup_method(self, method):
super(TestEncoding, self).setup_method(method)
data = {
'A': [compat.u('\u2019')] * 1000,
'B': np.arange(1000, dtype=np.int32),
'C': list(100 * 'abcdefghij'),
'D': date_range(datetime.datetime(2015, 4, 1), periods=1000),
'E': [datetime.timedelta(days=x) for x in range(1000)],
'G': [400] * 1000
}
self.frame = {
'float': DataFrame(dict((k, data[k]) for k in ['A', 'A'])),
'int': DataFrame(dict((k, data[k]) for k in ['B', 'B'])),
'mixed': DataFrame(data),
}
self.utf_encodings = ['utf8', 'utf16', 'utf32']
def test_utf(self):
# GH10581
for encoding in self.utf_encodings:
for frame in compat.itervalues(self.frame):
result = self.encode_decode(frame, encoding=encoding)
assert_frame_equal(result, frame)
def test_default_encoding(self):
for frame in compat.itervalues(self.frame):
result = frame.to_msgpack()
expected = frame.to_msgpack(encoding='utf8')
assert result == expected
result = self.encode_decode(frame)
assert_frame_equal(result, frame)
def legacy_packers_versions():
# yield the packers versions
path = tm.get_data_path('legacy_msgpack')
for v in os.listdir(path):
p = os.path.join(path, v)
if os.path.isdir(p):
yield v
class TestMsgpack(object):
"""
How to add msgpack tests:
1. Install pandas version intended to output the msgpack.
TestPackers
2. Execute "generate_legacy_storage_files.py" to create the msgpack.
$ python generate_legacy_storage_files.py <output_dir> msgpack
3. Move the created pickle to "data/legacy_msgpack/<version>" directory.
"""
minimum_structure = {'series': ['float', 'int', 'mixed',
'ts', 'mi', 'dup'],
'frame': ['float', 'int', 'mixed', 'mi'],
'panel': ['float'],
'index': ['int', 'date', 'period'],
'mi': ['reg2']}
def check_min_structure(self, data, version):
for typ, v in self.minimum_structure.items():
assert typ in data, '"{0}" not found in unpacked data'.format(typ)
for kind in v:
msg = '"{0}" not found in data["{1}"]'.format(kind, typ)
assert kind in data[typ], msg
def compare(self, current_data, all_data, vf, version):
# GH12277 encoding default used to be latin-1, now utf-8
if LooseVersion(version) < '0.18.0':
data = read_msgpack(vf, encoding='latin-1')
else:
data = read_msgpack(vf)
self.check_min_structure(data, version)
for typ, dv in data.items():
assert typ in all_data, ('unpacked data contains '
'extra key "{0}"'
.format(typ))
for dt, result in dv.items():
assert dt in current_data[typ], ('data["{0}"] contains extra '
'key "{1}"'.format(typ, dt))
try:
expected = current_data[typ][dt]
except KeyError:
continue
# use a specific comparator
# if available
comp_method = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comp_method, None)
if comparator is not None:
comparator(result, expected, typ, version)
else:
check_arbitrary(result, expected)
return data
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('version', legacy_packers_versions())
def test_msgpacks_legacy(self, current_packers_data, all_packers_data,
version):
pth = tm.get_data_path('legacy_msgpack/{0}'.format(version))
n = 0
for f in os.listdir(pth):
# GH12142 0.17 files packed in P2 can't be read in P3
if (compat.PY3 and version.startswith('0.17.') and
f.split('.')[-4][-1] == '2'):
continue
vf = os.path.join(pth, f)
try:
with catch_warnings(record=True):
self.compare(current_packers_data, all_packers_data,
vf, version)
except ImportError:
# blosc not installed
continue
n += 1
assert n > 0, 'Msgpack files are not tested'
| mit |
cactusbin/nyt | matplotlib/lib/matplotlib/projections/polar.py | 4 | 26829 | from __future__ import print_function
import math
import warnings
import numpy as np
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
import matplotlib.axis as maxis
from matplotlib import cbook
from matplotlib import docstring
from matplotlib.patches import Circle
from matplotlib.path import Path
from matplotlib.ticker import Formatter, Locator, FormatStrFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper, \
ScaledTranslation, blended_transform_factory, BboxTransformToMaxOnly
import matplotlib.spines as mspines
class PolarTransform(Transform):
"""
The base polar transform. This handles projection *theta* and
*r* into Cartesian coordinate space *x* and *y*, but does not
perform the ultimate affine transformation into the correct
position.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, tr):
xy = np.empty(tr.shape, np.float_)
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
t = tr[:, 0:1]
r = tr[:, 1:2]
x = xy[:, 0:1]
y = xy[:, 1:2]
t *= theta_direction
t += theta_offset
r = r - rmin
mask = r < 0
x[:] = np.where(mask, np.nan, r * np.cos(t))
y[:] = np.where(mask, np.nan, r * np.sin(t))
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
if len(vertices) == 2 and vertices[0, 0] == vertices[1, 0]:
return Path(self.transform(vertices), path.codes)
ipath = path.interpolated(path._interpolation_steps)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return PolarAxes.InvertedPolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class PolarAffine(Affine2DBase):
"""
The affine part of the polar projection. Scales the output so
that maximum radius rests on the edge of the axes circle.
"""
def __init__(self, scale_transform, limits):
"""
*limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi).
"""
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None
def get_matrix(self):
if self._invalid:
limits_scaled = self._limits.transformed(self._scale_transform)
yscale = limits_scaled.ymax - limits_scaled.ymin
affine = Affine2D() \
.scale(0.5 / yscale) \
.translate(0.5, 0.5)
self._mtx = affine.get_matrix()
self._inverted = None
self._invalid = 0
return self._mtx
get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__
def __getstate__(self):
return {}
class InvertedPolarTransform(Transform):
"""
The inverse of the polar transform, mapping Cartesian
coordinate space *x* and *y* back to *theta* and *r*.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, axis=None, use_rmin=True):
Transform.__init__(self)
self._axis = axis
self._use_rmin = use_rmin
def transform_non_affine(self, xy):
if self._axis is not None:
if self._use_rmin:
rmin = self._axis.viewLim.ymin
else:
rmin = 0
theta_offset = self._axis.get_theta_offset()
theta_direction = self._axis.get_theta_direction()
else:
rmin = 0
theta_offset = 0
theta_direction = 1
x = xy[:, 0:1]
y = xy[:, 1:]
r = np.sqrt(x*x + y*y)
theta = np.arccos(x / r)
theta = np.where(y < 0, 2 * np.pi - theta, theta)
theta -= theta_offset
theta *= theta_direction
r += rmin
return np.concatenate((theta, r), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return PolarAxes.PolarTransform(self._axis, self._use_rmin)
inverted.__doc__ = Transform.inverted.__doc__
class ThetaFormatter(Formatter):
"""
Used to format the *theta* tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __call__(self, x, pos=None):
# \u00b0 : degree symbol
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % ((x / np.pi) * 180.0)
else:
# we use unicode, rather than mathtext with \circ, so
# that it will work correctly with any arbitrary font
# (assuming it has a degree sign), whereas $5\circ$
# will only work correctly with one of the supported
# math fonts (Computer Modern and STIX)
return u"%0.0f\u00b0" % ((x / np.pi) * 180.0)
class RadialLocator(Locator):
"""
Used to locate radius ticks.
Ensures that all ticks are strictly positive. For all other
tasks, it delegates to the base
:class:`~matplotlib.ticker.Locator` (which may be different
depending on the scale of the *r*-axis.
"""
def __init__(self, base):
self.base = base
def __call__(self):
ticks = self.base()
return [x for x in ticks if x > 0]
def autoscale(self):
return self.base.autoscale()
def pan(self, numsteps):
return self.base.pan(numsteps)
def zoom(self, direction):
return self.base.zoom(direction)
def refresh(self):
return self.base.refresh()
def view_limits(self, vmin, vmax):
vmin, vmax = self.base.view_limits(vmin, vmax)
return 0, vmax
class PolarAxes(Axes):
"""
A polar graph projection, where the input dimensions are *theta*, *r*.
Theta starts pointing east and goes anti-clockwise.
"""
name = 'polar'
def __init__(self, *args, **kwargs):
"""
Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation.
"""
self.resolution = kwargs.pop('resolution', 1)
self._default_theta_offset = kwargs.pop('theta_offset', 0)
self._default_theta_direction = kwargs.pop('theta_direction', 1)
if self.resolution not in (None, 1):
warnings.warn(
"""The resolution kwarg to Polar plots is now ignored.
If you need to interpolate data points, consider running
cbook.simple_linear_interpolation on the data before passing to matplotlib.""")
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
__init__.__doc__ = Axes.__init__.__doc__
def cla(self):
Axes.cla(self)
self.title.set_y(1.05)
self.xaxis.set_major_formatter(self.ThetaFormatter())
self.xaxis.isDefault_majfmt = True
angles = np.arange(0.0, 360.0, 45.0)
self.set_thetagrids(angles)
self.yaxis.set_major_locator(self.RadialLocator(self.yaxis.get_major_locator()))
self.grid(rcParams['polaraxes.grid'])
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.set_theta_offset(self._default_theta_offset)
self.set_theta_direction(self._default_theta_direction)
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Calling polar_axes.xaxis.cla() or polar_axes.xaxis.cla()
# results in weird artifacts. Therefore we disable this for
# now.
# self.spines['polar'].register_axis(self.yaxis)
self._update_transScale()
def _set_lim_and_transforms(self):
self.transAxes = BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = TransformWrapper(IdentityTransform())
# A (possibly non-linear) projection on the (already scaled)
# data. This one is aware of rmin
self.transProjection = self.PolarTransform(self)
# This one is not aware of rmin
self.transPureProjection = self.PolarTransform(self, use_rmin=False)
# An affine transformation on the data, generally to limit the
# range of the axes
self.transProjectionAffine = self.PolarAffine(self.transScale, self.viewLim)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = self.transScale + self.transProjection + \
(self.transProjectionAffine + self.transAxes)
# This is the transform for theta-axis ticks. It is
# equivalent to transData, except it always puts r == 1.0 at
# the edge of the axis circle.
self._xaxis_transform = (
self.transPureProjection +
self.PolarAffine(IdentityTransform(), Bbox.unit()) +
self.transAxes)
# The theta labels are moved from radius == 0.0 to radius == 1.1
self._theta_label1_position = Affine2D().translate(0.0, 1.1)
self._xaxis_text1_transform = (
self._theta_label1_position +
self._xaxis_transform)
self._theta_label2_position = Affine2D().translate(0.0, 1.0 / 1.1)
self._xaxis_text2_transform = (
self._theta_label2_position +
self._xaxis_transform)
# This is the transform for r-axis ticks. It scales the theta
# axis so the gridlines from 0.0 to 1.0, now go from 0.0 to
# 2pi.
self._yaxis_transform = (
Affine2D().scale(np.pi * 2.0, 1.0) +
self.transData)
# The r-axis labels are put at an angle and padded in the r-direction
self._r_label_position = ScaledTranslation(
22.5, 0.0, Affine2D())
self._yaxis_text_transform = (
self._r_label_position +
Affine2D().scale(1.0 / 360.0, 1.0) +
self._yaxis_transform
)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'center', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'center', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'bottom', 'left'
elif angle < 180.:
return self._yaxis_text_transform, 'bottom', 'right'
elif angle < 270.:
return self._yaxis_text_transform, 'top', 'right'
else:
return self._yaxis_text_transform, 'top', 'left'
def get_yaxis_text2_transform(self, pad):
angle = self._r_label_position.to_values()[4]
if angle < 90.:
return self._yaxis_text_transform, 'top', 'right'
elif angle < 180.:
return self._yaxis_text_transform, 'top', 'left'
elif angle < 270.:
return self._yaxis_text_transform, 'bottom', 'left'
else:
return self._yaxis_text_transform, 'bottom', 'right'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'polar':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_rmax(self, rmax):
self.viewLim.y1 = rmax
def get_rmax(self):
return self.viewLim.ymax
def set_rmin(self, rmin):
self.viewLim.y0 = rmin
def get_rmin(self):
return self.viewLim.ymin
def set_theta_offset(self, offset):
"""
Set the offset for the location of 0 in radians.
"""
self._theta_offset = offset
def get_theta_offset(self):
"""
Get the offset for the location of 0 in radians.
"""
return self._theta_offset
def set_theta_zero_location(self, loc):
"""
Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".
"""
mapping = {
'N': np.pi * 0.5,
'NW': np.pi * 0.75,
'W': np.pi,
'SW': np.pi * 1.25,
'S': np.pi * 1.5,
'SE': np.pi * 1.75,
'E': 0,
'NE': np.pi * 0.25 }
return self.set_theta_offset(mapping[loc])
def set_theta_direction(self, direction):
"""
Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction
"""
if direction in ('clockwise',):
self._direction = -1
elif direction in ('counterclockwise', 'anticlockwise'):
self._direction = 1
elif direction in (1, -1):
self._direction = direction
else:
raise ValueError("direction must be 1, -1, clockwise or counterclockwise")
def get_theta_direction(self):
"""
Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction
"""
return self._direction
def set_rlim(self, *args, **kwargs):
if 'rmin' in kwargs:
kwargs['ymin'] = kwargs.pop('rmin')
if 'rmax' in kwargs:
kwargs['ymax'] = kwargs.pop('rmax')
return self.set_ylim(*args, **kwargs)
def set_yscale(self, *args, **kwargs):
Axes.set_yscale(self, *args, **kwargs)
self.yaxis.set_major_locator(
self.RadialLocator(self.yaxis.get_major_locator()))
def set_rscale(self, *args, **kwargs):
return Axes.set_yscale(self, *args, **kwargs)
def set_rticks(self, *args, **kwargs):
return Axes.set_yticks(self, *args, **kwargs)
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None,
**kwargs):
"""
Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). e.g., 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
angles = self.convert_yunits(angles)
angles = np.asarray(angles, np.float_)
self.set_xticks(angles * (np.pi / 180.0))
if labels is not None:
self.set_xticklabels(labels)
elif fmt is not None:
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if frac is not None:
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, 1.0 / frac)
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None,
**kwargs):
"""
Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats
"""
# Make sure we take into account unitized data
radii = self.convert_xunits(radii)
radii = np.asarray(radii)
rmin = radii.min()
if rmin <= 0:
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if labels is not None:
self.set_yticklabels(labels)
elif fmt is not None:
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if angle is None:
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()
def set_xscale(self, scale, *args, **kwargs):
if scale != 'linear':
raise NotImplementedError("You can not set the xscale on a polar plot.")
def set_xlim(self, *args, **kargs):
# The xlim is fixed, no matter what you do
self.viewLim.intervalx = (0.0, np.pi * 2.0)
def format_coord(self, theta, r):
"""
Return a format string formatting the coordinate using Unicode
characters.
"""
theta /= math.pi
# \u03b8: lower-case theta
# \u03c0: lower-case pi
# \u00b0: degree symbol
return u'\u03b8=%0.3f\u03c0 (%0.3f\u00b0), r=%0.3f' % (theta, theta * 180.0, r)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial.
"""
return True
def start_pan(self, x, y, button):
angle = np.deg2rad(self._r_label_position.to_values()[4])
mode = ''
if button == 1:
epsilon = np.pi / 45.0
t, r = self.transData.inverted().transform_point((x, y))
if t >= angle - epsilon and t <= angle + epsilon:
mode = 'drag_r_labels'
elif button == 3:
mode = 'zoom'
self._pan_start = cbook.Bunch(
rmax = self.get_rmax(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
r_label_angle = self._r_label_position.to_values()[4],
x = x,
y = y,
mode = mode
)
def end_pan(self):
del self._pan_start
def drag_pan(self, button, key, x, y):
p = self._pan_start
if p.mode == 'drag_r_labels':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
# Deal with theta
dt0 = t - startt
dt1 = startt - t
if abs(dt1) < abs(dt0):
dt = abs(dt1) * sign(dt0) * -1.0
else:
dt = dt0 * -1.0
dt = (dt / np.pi) * 180.0
self._r_label_position._t = (p.r_label_angle - dt, 0.0)
self._r_label_position.invalidate()
trans, vert1, horiz1 = self.get_yaxis_text1_transform(0.0)
trans, vert2, horiz2 = self.get_yaxis_text2_transform(0.0)
for t in self.yaxis.majorTicks + self.yaxis.minorTicks:
t.label1.set_va(vert1)
t.label1.set_ha(horiz1)
t.label2.set_va(vert2)
t.label2.set_ha(horiz2)
elif p.mode == 'zoom':
startt, startr = p.trans_inverse.transform_point((p.x, p.y))
t, r = p.trans_inverse.transform_point((x, y))
dr = r - startr
# Deal with r
scale = r / startr
self.set_rmax(p.rmax / scale)
# to keep things all self contained, we can put aliases to the Polar classes
# defined above. This isn't strictly necessary, but it makes some of the
# code more readable (and provides a backwards compatible Polar API)
PolarAxes.PolarTransform = PolarTransform
PolarAxes.PolarAffine = PolarAffine
PolarAxes.InvertedPolarTransform = InvertedPolarTransform
PolarAxes.ThetaFormatter = ThetaFormatter
PolarAxes.RadialLocator = RadialLocator
# These are a couple of aborted attempts to project a polar plot using
# cubic bezier curves.
# def transform_path(self, path):
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# vertices = self.transform(vertices)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# kappa = 0.5
# p0 = vertices[0:-1]
# p1 = vertices[1: ]
# x0 = p0[:, 0:1]
# y0 = p0[:, 1: ]
# b0 = ((y0 - x0) - y0) / ((x0 + y0) - x0)
# a0 = y0 - b0*x0
# x1 = p1[:, 0:1]
# y1 = p1[:, 1: ]
# b1 = ((y1 - x1) - y1) / ((x1 + y1) - x1)
# a1 = y1 - b1*x1
# x = -(a0-a1) / (b0-b1)
# y = a0 + b0*x
# xk = (x - x0) * kappa + x0
# yk = (y - y0) * kappa + y0
# result[1::3, 0:1] = xk
# result[1::3, 1: ] = yk
# xk = (x - x1) * kappa + x1
# yk = (y - y1) * kappa + y1
# result[2::3, 0:1] = xk
# result[2::3, 1: ] = yk
# result[3::3] = p1
# print vertices[-2:]
# print result[-2:]
# return mpath.Path(result, codes)
# twopi = 2.0 * np.pi
# halfpi = 0.5 * np.pi
# vertices = path.vertices
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# maxtd = td.max()
# interpolate = np.ceil(maxtd / halfpi)
# print "interpolate", interpolate
# if interpolate > 1.0:
# vertices = self.interpolate(vertices, interpolate)
# result = np.zeros((len(vertices) * 3 - 2, 2), np.float_)
# codes = mpath.Path.CURVE4 * np.ones((len(vertices) * 3 - 2, ), mpath.Path.code_type)
# result[0] = vertices[0]
# codes[0] = mpath.Path.MOVETO
# kappa = 4.0 * ((np.sqrt(2.0) - 1.0) / 3.0)
# tkappa = np.arctan(kappa)
# hyp_kappa = np.sqrt(kappa*kappa + 1.0)
# t0 = vertices[0:-1, 0]
# t1 = vertices[1: , 0]
# r0 = vertices[0:-1, 1]
# r1 = vertices[1: , 1]
# td = np.where(t1 > t0, t1 - t0, twopi - (t0 - t1))
# td_scaled = td / (np.pi * 0.5)
# rd = r1 - r0
# r0kappa = r0 * kappa * td_scaled
# r1kappa = r1 * kappa * td_scaled
# ravg_kappa = ((r1 + r0) / 2.0) * kappa * td_scaled
# result[1::3, 0] = t0 + (tkappa * td_scaled)
# result[1::3, 1] = r0*hyp_kappa
# # result[1::3, 1] = r0 / np.cos(tkappa * td_scaled) # np.sqrt(r0*r0 + ravg_kappa*ravg_kappa)
# result[2::3, 0] = t1 - (tkappa * td_scaled)
# result[2::3, 1] = r1*hyp_kappa
# # result[2::3, 1] = r1 / np.cos(tkappa * td_scaled) # np.sqrt(r1*r1 + ravg_kappa*ravg_kappa)
# result[3::3, 0] = t1
# result[3::3, 1] = r1
# print vertices[:6], result[:6], t0[:6], t1[:6], td[:6], td_scaled[:6], tkappa
# result = self.transform(result)
# return mpath.Path(result, codes)
# transform_path_non_affine = transform_path
| unlicense |
yuriyfilonov/text2vec_old | src/validation.py | 1 | 16449 | import itertools
import numpy
import os
import random
import re
import scipy.spatial.distance as ssd
import scipy.stats
from scipy.cluster.hierarchy import dendrogram, linkage
import pandas
from matplotlib import colors
from matplotlib import pyplot as plt
import vectors
from libs import tsne
rubensteinGoodenoughData = None
def rubensteinGoodenough(wordIndexMap, embeddings):
global rubensteinGoodenoughData
if rubensteinGoodenoughData is None:
rubensteinGoodenoughData = []
rubensteinGoodenoughFilePath = 'res/RG/EN-RG-65.txt'
with open(rubensteinGoodenoughFilePath) as rgFile:
lines = rgFile.readlines()
for line in lines:
word0, word1, targetScore = tuple(line.strip().split('\t'))
targetScore = float(targetScore)
rubensteinGoodenoughData.append((word0, word1, targetScore))
scores = []
targetScores = []
for word0, word1, targetScore in rubensteinGoodenoughData:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
rubensteinGoodenoughMetric = numpy.mean([pearson, spearman])
return rubensteinGoodenoughMetric
wordSimilarity353Data = None
def wordSimilarity353(wordIndexMap, embeddings):
global wordSimilarity353Data
if wordSimilarity353Data is None:
wordSimilarity353Data = []
wordSimilarity353FilePath = 'res/WordSimilarity-353/combined.csv'
data = pandas.read_csv(wordSimilarity353FilePath)
for word0, word1, score in zip(data['Word1'], data['Word2'], data['Score']):
wordSimilarity353Data.append((word0, word1, score))
scores = []
targetScores = []
for word0, word1, targetScore in wordSimilarity353Data:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
metric = numpy.mean([pearson, spearman])
return metric
simLex999Data = None
def simLex999(wordIndexMap, embeddings):
global simLex999Data
if simLex999Data is None:
simLex999Data = []
simLex999FilePath = 'res/SimLex-999/SimLex-999.txt'
data = pandas.read_csv(simLex999FilePath, sep='\t')
for word0, word1, targetScore in zip(data['word1'], data['word2'], data['SimLex999']):
simLex999Data.append((word0, word1, targetScore))
targetScores = []
scores = []
for word0, word1, targetScore in simLex999Data:
if word0 in wordIndexMap and word1 in wordIndexMap:
targetScores.append(targetScore)
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
score = vectors.cosineSimilarity(word0Embedding, word1Embedding)
scores.append(score)
if len(scores) == 0:
return numpy.nan
pearson, pearsonDeviation = scipy.stats.pearsonr(scores, targetScores)
spearman, spearmanDeviation = scipy.stats.spearmanr(scores, targetScores)
simLex999Metric = numpy.mean([pearson, spearman])
return simLex999Metric
syntacticWordData = None
def syntacticWordRelations(wordIndexMap, embeddings, maxWords=10):
global syntacticWordData
if syntacticWordData is None:
syntacticWordData = []
syntWordRelFilePath = 'res/Syntactic-Word-Relations/questions-words.txt'
with open(syntWordRelFilePath, 'r') as swrFile:
lines = swrFile.readlines()
syntacticWordData = [tuple(line.lower().split(' ')) for line in lines if not line.startswith(':')]
syntacticWordData = [(word0.strip(), word1.strip(), word2.strip(), word3.strip()) for word0, word1, word2, word3 in syntacticWordData]
scores = []
for word0, word1, word2, word3 in syntacticWordData:
if word0 not in wordIndexMap or word1 not in wordIndexMap or word2 not in wordIndexMap or word3 not in wordIndexMap:
continue
word0Index = wordIndexMap[word0]
word1Index = wordIndexMap[word1]
word2Index = wordIndexMap[word2]
word3Index = wordIndexMap[word3]
word0Embedding = embeddings[word0Index]
word1Embedding = embeddings[word1Index]
word2Embedding = embeddings[word2Index]
word3Embedding = embeddings[word3Index]
similarity01 = vectors.cosineSimilarity(word0Embedding, word1Embedding)
similarity23 = vectors.cosineSimilarity(word2Embedding, word3Embedding)
score = 1
minSimilarityDelta = abs(similarity01 - similarity23)
for embedding in embeddings[:maxWords]:
similarity2N = vectors.cosineSimilarity(word2Embedding, embedding)
similarityDelta = abs(similarity01 - similarity2N)
score = not (similarityDelta < minSimilarityDelta)
if not score:
break
scores.append(score)
if len(scores) == 0:
return numpy.nan
syntacticWordRelationsMetric = float(sum(scores)) / len(scores)
return syntacticWordRelationsMetric
satQuestionsData = None
def satQuestions(wordIndexMap, embeddings):
global satQuestionsData
if satQuestionsData is None:
satQuestionsData = []
satQuestionsFilePath = 'res/SAT-Questions/SAT-package-V3.txt'
maxLineLength = 50
aCode = ord('a')
with open(satQuestionsFilePath) as satFile:
line = satFile.readline()
while line != '':
if len(line) < maxLineLength:
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
if match:
stemWord0, stemWord1 = match.group('word0'), match.group('word1')
satQuestion = [stemWord0, stemWord1]
line = satFile.readline()
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
while match:
choiceWord0, choiceWord1 = match.group('word0'), match.group('word1')
satQuestion.append(choiceWord0)
satQuestion.append(choiceWord1)
line = satFile.readline()
match = re.match('(?P<word0>[\w-]+)\s(?P<word1>[\w-]+)\s[nvar]:[nvar]', line)
correctChoiceIndex = ord(line.strip()) - aCode
satQuestion.append(correctChoiceIndex)
satQuestionsData.append(satQuestion)
line = satFile.readline()
scores = []
for satQuestion in satQuestionsData:
if any([word not in wordIndexMap for word in satQuestion[:-1]]):
continue
stemWord0, stemWord1 = satQuestion[:2]
stemWord0Index = wordIndexMap[stemWord0]
stemWord1Index = wordIndexMap[stemWord1]
stemWord0Embedding, stemWord1Embedding = embeddings[stemWord0Index], embeddings[stemWord1Index]
stemSimilarity = vectors.cosineSimilarity(stemWord0Embedding, stemWord1Embedding)
correctChoiceIndex = satQuestion[-1]
choiceSimilarityDeltas = []
choices = satQuestion[2:-1]
for i in xrange(0, len(choices), 2):
choiceWord0, choiceWord1 = choices[i], choices[i+1]
choiceWord0Index, choiceWord1Index = wordIndexMap[choiceWord0], wordIndexMap[choiceWord1]
choiceWord0Embedding, choiceWord1Embedding = embeddings[choiceWord0Index], embeddings[choiceWord1Index]
choiceSimilarity = vectors.cosineSimilarity(choiceWord0Embedding, choiceWord1Embedding)
choiceSimilarityDelta = abs(stemSimilarity - choiceSimilarity)
choiceSimilarityDeltas.append(choiceSimilarityDelta)
choiceIndex = numpy.argmin(choiceSimilarityDeltas)
scores.append(int(choiceIndex == correctChoiceIndex))
if len(scores) == 0:
return numpy.nan
metric = float(sum(scores)) / len(scores)
return metric
def validate(wordIndexMap, embeddings):
rg = rubensteinGoodenough(wordIndexMap, embeddings)
sim353 = wordSimilarity353(wordIndexMap, embeddings)
sl999 = simLex999(wordIndexMap, embeddings)
syntRel = syntacticWordRelations(wordIndexMap, embeddings)
sat = satQuestions(wordIndexMap, embeddings)
return rg, sim353, sl999, syntRel, sat
def dump(metricsPath, epoch, customMetrics):
metrics = {
'epoch': epoch
}
for name, value in customMetrics.items():
metrics[name] = value
metrics = [metrics]
if os.path.exists(metricsPath):
with open(metricsPath, 'a') as metricsFile:
metricsHistory = pandas.DataFrame.from_dict(metrics)
metricsHistory.to_csv(metricsFile, header=False)
else:
metricsHistory = pandas.DataFrame.from_dict(metrics)
metricsHistory.to_csv(metricsPath, header=True)
def compareMetrics(metricsHistoryPath, *metricNames):
metrics = pandas.DataFrame.from_csv(metricsHistoryPath)
iterations = range(0, len(metrics))
plt.grid()
metricScatters = []
colorNames = colors.cnames.keys()
for metricIndex, metricName in enumerate(metricNames):
metric = metrics[metricName]
random.shuffle(colorNames)
metricScatter = plt.scatter(iterations, metric, c=colorNames[metricIndex % len(colorNames)])
metricScatters.append(metricScatter)
metricsFileName = os.path.basename(metricsHistoryPath)
plt.title(metricsFileName)
plt.legend(metricScatters, metricNames, scatterpoints=1, loc='lower right', ncol=3, fontsize=8)
plt.show()
def compareHistories(metricName, *metricsHistoryPaths):
plt.grid()
metricScatters = []
metricsHistoryNames = []
colorNames = colors.cnames.keys()
for metricsHistoryIndex, metricsHistoryPath in enumerate(metricsHistoryPaths):
metrics = pandas.DataFrame.from_csv(metricsHistoryPath)
iterations = range(0, len(metrics))
metric = metrics[metricName]
random.shuffle(colorNames)
metricScatter = plt.scatter(iterations, metric, c=colorNames[metricsHistoryIndex % len(colorNames)])
metricScatters.append(metricScatter)
metricsHistoryName = os.path.basename(metricsHistoryPath)
metricsHistoryNames.append(metricsHistoryName)
plt.title(metricName)
plt.legend(metricScatters, metricsHistoryNames, scatterpoints=1, loc='lower right', ncol=3, fontsize=8)
plt.show()
def plotEmbeddings(fileIndexMap, embeddings):
embeddingsCount, embeddingSize = embeddings.shape
embeddings = numpy.asarray(embeddings, 'float64')
lowDimEmbeddings = tsne.tsne(embeddings, 2, embeddingSize, 20.0, 1000)
filePaths = fileIndexMap.keys()
fileNames = [os.path.basename(filePath).split('.')[0] for filePath in filePaths]
labels = set(fileNames)
labels = zip(labels, numpy.arange(0, len(labels)))
labels = [(label, index) for label, index in labels]
labels = dict(labels)
labels = [labels[fileName] for fileName in fileNames]
lowDimEmbeddingsX, lowDimEmbeddingsY = lowDimEmbeddings[:,0], lowDimEmbeddings[:,1]
figure, axis = plt.subplots()
axis.scatter(lowDimEmbeddingsX, lowDimEmbeddingsY, 20, labels)
for index, fileName in enumerate(fileNames):
axis.annotate(fileName, (lowDimEmbeddingsX[index],lowDimEmbeddingsY[index]))
plt.grid()
plt.show()
def mapEmbeddings2LowDim(indexMap, embeddingsList):
filePaths = indexMap.keys()
fileNames = [os.path.basename(filePath).split('.')[0] for filePath in filePaths]
labels = set(fileNames)
labels = zip(labels, numpy.arange(0, len(labels)))
labels = [(label, index) for label, index in labels]
labels = dict(labels)
labels = [labels[fileName] for fileName in fileNames]
figure, axises = plt.subplots(1, len(embeddingsList))
for embeddings, axis in zip(embeddingsList, axises):
embeddingsCount, embeddingSize = embeddings.shape
embeddings = numpy.asarray(embeddings, 'float64')
lowDimEmbeddings = tsne.tsne(embeddings, 2, embeddingSize, 20.0, 1000)
lowDimEmbeddingsX, lowDimEmbeddingsY = lowDimEmbeddings[:,0], lowDimEmbeddings[:,1]
axis.grid()
axis.scatter(lowDimEmbeddingsX, lowDimEmbeddingsY, 20, labels)
for index, fileName in enumerate(fileNames):
axis.annotate(fileName, (lowDimEmbeddingsX[index], lowDimEmbeddingsY[index]))
figureManager = plt.get_current_fig_manager()
figureManager.resize(*figureManager.window.maxsize())
plt.show()
def compareEmbeddings(indexMap, embeddingsList, comparator=None, annotate=False, axisLabels=True):
embeddingsCount = len(indexMap)
embeddingIndices = numpy.arange(0, embeddingsCount)
xy = [xy for xy in itertools.product(embeddingIndices, embeddingIndices)]
xx, yy = zip(*xy)
if comparator is None:
comparator = lambda a, b: vectors.cosineSimilarity(a, b) + 1 / vectors.euclideanDistance(a, b)
function = lambda xy: comparator(embeddingsList[xy[0]], embeddingsList[xy[1]]) if xy[0] != xy[1] else numpy.nan
comparisons = map(function, xy)
comparisons = numpy.reshape(comparisons, (embeddingsCount, embeddingsCount))
nanxx, nanyy = numpy.where(numpy.isnan(comparisons))
nanxy = zip(nanxx, nanyy)
leftx = lambda x: max(x, 0)
rightx = lambda x: min(x, comparisons.shape[0])
lefty = lambda y: max(y, 0)
righty = lambda y: min(y, comparisons.shape[1])
for x, y in nanxy:
neighbours = comparisons[leftx(x-1):rightx(x+2),lefty(y-1):righty(y+2)]
neighbours = neighbours[neighbours > 0]
comparisons[x,y] = numpy.mean(neighbours)
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
if axisLabels:
filePaths = indexMap.keys()
fileNames = [os.path.basename(filePath).split('.')[0] for filePath in filePaths]
indices = [indexMap[filePath] for filePath in filePaths]
plt.xticks(indices, fileNames, size='small', rotation='vertical')
plt.yticks(indices, fileNames, size='small')
plt.contourf(comparisons)
if annotate:
for x, y, c in zip(xx, yy, comparisons.flatten()):
c = '{0:.1f}'.format(c*100)
plt.annotate(c, (x, y))
plt.show()
def buildEmbeddingsTree(indexMap, embeddings, comparator=None):
embeddingsCount = len(embeddings)
embeddingIndices = numpy.arange(0, embeddingsCount)
xy = [xy for xy in itertools.product(embeddingIndices, embeddingIndices)]
comparator = lambda a, b: vectors.euclideanDistance(a, b) + 1 / (2 + 2*vectors.cosineSimilarity(a, b))
function = lambda xy: comparator(embeddings[xy[0]], embeddings[xy[1]]) if xy[0] != xy[1] else 0
comparisons = map(function, xy)
maxComparison = max(comparisons)
comparisons = numpy.reshape(comparisons, (embeddingsCount, embeddingsCount)) / maxComparison
comparisons = ssd.squareform(comparisons)
links = linkage(comparisons)
fig, ax = plt.subplots()
fig.subplots_adjust(right=0.8)
names = map(lambda nameIndexPair: nameIndexPair[0].split('/')[-1], indexMap.items())
names = sorted(names)
dendrogram(
links,
leaf_rotation=90.,
leaf_font_size=8.,
orientation='right',
labels=names,
show_contracted=True,
show_leaf_counts=True)
plt.show() | apache-2.0 |
hlin117/statsmodels | statsmodels/examples/ex_univar_kde.py | 34 | 5127 | """
This example tests the nonparametric estimator
for several popular univariate distributions with the different
bandwidth selction methods - CV-ML; CV-LS; Scott's rule of thumb.
Produces six different plots for each distribution
1) Beta
2) f
3) Pareto
4) Laplace
5) Weibull
6) Poisson
"""
from __future__ import print_function
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
KDEMultivariate = sm.nonparametric.KDEMultivariate
np.random.seed(123456)
# Beta distribution
# Parameters
a = 2
b = 5
nobs = 250
support = np.random.beta(a, b, size=nobs)
rv = stats.beta(a, b)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(1)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Beta Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# f distribution
df = 100
dn = 100
nobs = 250
support = np.random.f(dn, df, size=nobs)
rv = stats.f(df, dn)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(2)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of f Distributed " \
"Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Pareto distribution
a = 2
nobs = 150
support = np.random.pareto(a, size=nobs)
rv = stats.pareto(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(3)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Pareto " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Laplace Distribution
mu = 0
s = 1
nobs = 250
support = np.random.laplace(mu, s, size=nobs)
rv = stats.laplace(mu, s)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(4)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Laplace " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Weibull Distribution
a = 1
nobs = 250
support = np.random.weibull(a, size=nobs)
rv = stats.weibull_min(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='c', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='c', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='c', bw='cv_ml')
plt.figure(5)
plt.plot(support[ix], rv.pdf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Weibull " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
# Poisson Distribution
a = 2
nobs = 250
support = np.random.poisson(a, size=nobs)
rv = stats.poisson(a)
ix = np.argsort(support)
dens_normal = KDEMultivariate(data=[support], var_type='o', bw='normal_reference')
dens_cvls = KDEMultivariate(data=[support], var_type='o', bw='cv_ls')
dens_cvml = KDEMultivariate(data=[support], var_type='o', bw='cv_ml')
plt.figure(6)
plt.plot(support[ix], rv.pmf(support[ix]), label='Actual')
plt.plot(support[ix], dens_normal.pdf()[ix], label='Scott')
plt.plot(support[ix], dens_cvls.pdf()[ix], label='CV_LS')
plt.plot(support[ix], dens_cvml.pdf()[ix], label='CV_ML')
plt.title("Nonparametric Estimation of the Density of Poisson " \
"Distributed Random Variable")
plt.legend(('Actual', 'Scott', 'CV_LS', 'CV_ML'))
plt.show()
| bsd-3-clause |
qifeigit/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
GaZ3ll3/scikit-image | doc/examples/applications/plot_rank_filters.py | 14 | 18001 | """
============
Rank filters
============
Rank filters are non-linear filters using the local gray-level ordering to
compute the filtered value. This ensemble of filters share a common base: the
local gray-level histogram is computed on the neighborhood of a pixel (defined
by a 2-D structuring element). If the filtered value is taken as the middle
value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes such as:
* image quality enhancement
e.g. image smoothing, sharpening
* image pre-processing
e.g. noise reduction, contrast enhancement
* feature extraction
e.g. border detection, isolated point detection
* post-processing
e.g. small object removal, object grouping, contour smoothing
Some well known filters are specific cases of rank filters [1]_ e.g.
morphological dilation, morphological erosion, median filters.
In this example, we will see how to filter a gray-level image using some of the
linear and non-linear filters available in skimage. We use the `camera` image
from `skimage.data` for all comparisons.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from skimage import data
noisy_image = img_as_ubyte(data.camera())
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of grey values')
"""
.. image:: PLOT2RST.current_figure
Noise removal
=============
Some noise is added to the image, 1% of pixels are randomly set to 255, 1% are
randomly set to 0. The **median** filter is applied to remove the noise.
"""
from skimage.filters.rank import median
from skimage.morphology import disk
noise = np.random.random(noisy_image.shape)
noisy_image = img_as_ubyte(data.camera())
noisy_image[noise > 0.99] = 255
noisy_image[noise < 0.01] = 0
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Noisy image')
ax1.axis('off')
ax2.imshow(median(noisy_image, disk(1)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Median $r=1$')
ax2.axis('off')
ax3.imshow(median(noisy_image, disk(5)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax3.set_title('Median $r=5$')
ax3.axis('off')
ax4.imshow(median(noisy_image, disk(20)), vmin=0, vmax=255, cmap=plt.cm.gray)
ax4.set_title('Median $r=20$')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The added noise is efficiently removed, as the image defaults are small (1
pixel wide), a small filter radius is sufficient. As the radius is increasing,
objects with bigger sizes are filtered as well, such as the camera tripod. The
median filter is often used for noise removal because borders are preserved and
e.g. salt and pepper noise typically does not distort the gray-level.
Image smoothing
================
The example hereunder shows how a local **mean** filter smooths the camera man
image.
"""
from skimage.filters.rank import mean
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
loc_mean = mean(noisy_image, disk(10))
ax1.imshow(noisy_image, vmin=0, vmax=255, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(loc_mean, vmin=0, vmax=255, cmap=plt.cm.gray)
ax2.set_title('Local mean $r=10$')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
One may be interested in smoothing an image while preserving important borders
(median filters already achieved this), here we use the **bilateral** filter
that restricts the local neighborhood to pixel having a gray-level similar to
the central one.
.. note::
A different implementation is available for color images in
`skimage.filters.denoise_bilateral`.
"""
from skimage.filters.rank import mean_bilateral
noisy_image = img_as_ubyte(data.camera())
bilat = mean_bilateral(noisy_image.astype(np.uint16), disk(20), s0=10, s1=10)
fig, ax = plt.subplots(2, 2, figsize=(10, 7))
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(bilat, cmap=plt.cm.gray)
ax2.set_title('Bilateral mean')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(bilat[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
One can see that the large continuous part of the image (e.g. sky) is smoothed
whereas other details are preserved.
Contrast enhancement
====================
We compare here how the global histogram equalization is applied locally.
The equalized image [2]_ has a roughly linear cumulative distribution function
for each pixel neighborhood. The local version [3]_ of the histogram
equalization emphasizes every local gray-level variations.
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
.. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
from skimage import exposure
from skimage.filters import rank
noisy_image = img_as_ubyte(data.camera())
# equalize globally and locally
glob = exposure.equalize_hist(noisy_image) * 255
loc = rank.equalize(noisy_image, disk(20))
# extract histogram for each image
hist = np.histogram(noisy_image, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
fig, ax = plt.subplots(3, 2, figsize=(10, 10))
ax1, ax2, ax3, ax4, ax5, ax6 = ax.ravel()
ax1.imshow(noisy_image, interpolation='nearest', cmap=plt.cm.gray)
ax1.axis('off')
ax2.plot(hist[1][:-1], hist[0], lw=2)
ax2.set_title('Histogram of gray values')
ax3.imshow(glob, interpolation='nearest', cmap=plt.cm.gray)
ax3.axis('off')
ax4.plot(glob_hist[1][:-1], glob_hist[0], lw=2)
ax4.set_title('Histogram of gray values')
ax5.imshow(loc, interpolation='nearest', cmap=plt.cm.gray)
ax5.axis('off')
ax6.plot(loc_hist[1][:-1], loc_hist[0], lw=2)
ax6.set_title('Histogram of gray values')
"""
.. image:: PLOT2RST.current_figure
Another way to maximize the number of gray-levels used for an image is to apply
a local auto-leveling, i.e. the gray-value of a pixel is proportionally
remapped between local minimum and local maximum.
The following example shows how local auto-level enhances the camara man
picture.
"""
from skimage.filters.rank import autolevel
noisy_image = img_as_ubyte(data.camera())
auto = autolevel(noisy_image.astype(np.uint16), disk(20))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10, 7])
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(auto, cmap=plt.cm.gray)
ax2.set_title('Local autolevel')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
This filter is very sensitive to local outliers, see the little white spot in
the left part of the sky. This is due to a local maximum which is very high
comparing to the rest of the neighborhood. One can moderate this using the
percentile version of the auto-level filter which uses given percentiles (one
inferior, one superior) in place of local minimum and maximum. The example
below illustrates how the percentile parameters influence the local auto-level
result.
"""
from skimage.filters.rank import autolevel_percentile
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = autolevel_percentile(image, selem=selem, p0=.00, p1=1.0)
loc_perc_autolevel1 = autolevel_percentile(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel2 = autolevel_percentile(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel3 = autolevel_percentile(image, selem=selem, p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(np.hstack((image, loc_autolevel)), cmap=plt.cm.gray)
ax0.set_title('Original / auto-level')
ax1.imshow(
np.hstack((loc_perc_autolevel0, loc_perc_autolevel1)), vmin=0, vmax=255)
ax1.set_title('Percentile auto-level 0%,1%')
ax2.imshow(
np.hstack((loc_perc_autolevel2, loc_perc_autolevel3)), vmin=0, vmax=255)
ax2.set_title('Percentile auto-level 5% and 10%')
for ax in axes:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The morphological contrast enhancement filter replaces the central pixel by the
local maximum if the original pixel value is closest to local maximum,
otherwise by the minimum local.
"""
from skimage.filters.rank import enhance_contrast
noisy_image = img_as_ubyte(data.camera())
enh = enhance_contrast(noisy_image, disk(5))
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(enh, cmap=plt.cm.gray)
ax2.set_title('Local morphological contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(enh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The percentile version of the local morphological contrast enhancement uses
percentile *p0* and *p1* instead of the local minimum and maximum.
"""
from skimage.filters.rank import enhance_contrast_percentile
noisy_image = img_as_ubyte(data.camera())
penh = enhance_contrast_percentile(noisy_image, disk(5), p0=.1, p1=.9)
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(penh, cmap=plt.cm.gray)
ax2.set_title('Local percentile morphological\n contrast enhancement')
ax2.axis('off')
ax3.imshow(noisy_image[200:350, 350:450], cmap=plt.cm.gray)
ax3.axis('off')
ax4.imshow(penh[200:350, 350:450], cmap=plt.cm.gray)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image threshold
===============
The Otsu threshold [1]_ method can be applied locally using the local gray-
level distribution. In the example below, for each pixel, an "optimal"
threshold is determined by maximizing the variance between two classes of
pixels of the local neighborhood defined by a structuring element.
The example compares the local threshold with the global threshold
`skimage.filters.threshold_otsu`.
.. note::
Local is much slower than global thresholding. A function for global Otsu
thresholding can be found in : `skimage.filters.threshold_otsu`.
.. [4] http://en.wikipedia.org/wiki/Otsu's_method
"""
from skimage.filters.rank import otsu
from skimage.filters import threshold_otsu
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
fig, ax = plt.subplots(2, 2)
ax1, ax2, ax3, ax4 = ax.ravel()
fig.colorbar(ax1.imshow(p8, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Original')
ax1.axis('off')
fig.colorbar(ax2.imshow(t_loc_otsu, cmap=plt.cm.gray), ax=ax2)
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
ax3.imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
ax3.set_title('Original >= local Otsu' % t_glob_otsu)
ax3.axis('off')
ax4.imshow(glob_otsu, cmap=plt.cm.gray)
ax4.set_title('Global Otsu ($t=%d$)' % t_glob_otsu)
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
The following example shows how local Otsu thresholding handles a global level
shift applied to a synthetic image.
"""
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.imshow(m)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(m >= t, interpolation='nearest')
ax2.set_title('Local Otsu ($r=%d$)' % radius)
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Image morphology
================
Local maximum and local minimum are the base operators for gray-level
morphology.
.. note::
`skimage.dilate` and `skimage.erode` are equivalent filters (see below for
comparison).
Here is an example of the classical morphological gray-level filters: opening,
closing and morphological gradient.
"""
from skimage.filters.rank import maximum, minimum, gradient
noisy_image = img_as_ubyte(data.camera())
closing = maximum(minimum(noisy_image, disk(5)), disk(5))
opening = minimum(maximum(noisy_image, disk(5)), disk(5))
grad = gradient(noisy_image, disk(5))
# display results
fig, ax = plt.subplots(2, 2, figsize=[10, 7])
ax1, ax2, ax3, ax4 = ax.ravel()
ax1.imshow(noisy_image, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
ax2.imshow(closing, cmap=plt.cm.gray)
ax2.set_title('Gray-level closing')
ax2.axis('off')
ax3.imshow(opening, cmap=plt.cm.gray)
ax3.set_title('Gray-level opening')
ax3.axis('off')
ax4.imshow(grad, cmap=plt.cm.gray)
ax4.set_title('Morphological gradient')
ax4.axis('off')
"""
.. image:: PLOT2RST.current_figure
Feature extraction
===================
Local histograms can be exploited to compute local entropy, which is related to
the local image complexity. Entropy is computed using base 2 logarithm i.e. the
filter returns the minimum number of bits needed to encode local gray-level
distribution.
`skimage.rank.entropy` returns the local entropy on a given structuring
element. The following example shows applies this filter on 8- and 16-bit
images.
.. note::
to better use the available image bit, the function returns 10x entropy for
8-bit images and 1000x entropy for 16-bit images.
"""
from skimage import data
from skimage.filters.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
image = data.camera()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
fig.colorbar(ax1.imshow(image, cmap=plt.cm.gray), ax=ax1)
ax1.set_title('Image')
ax1.axis('off')
fig.colorbar(ax2.imshow(entropy(image, disk(5)), cmap=plt.cm.jet), ax=ax2)
ax2.set_title('Entropy')
ax2.axis('off')
"""
.. image:: PLOT2RST.current_figure
Implementation
==============
The central part of the `skimage.rank` filters is build on a sliding window
that updates the local gray-level histogram. This approach limits the algorithm
complexity to O(n) where n is the number of image pixels. The complexity is
also limited with respect to the structuring element size.
In the following we compare the performance of different implementations
available in `skimage`.
"""
from time import time
from scipy.ndimage import percentile_filter
from skimage.morphology import dilation
from skimage.filters.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
"""
Comparison between
* `filters.rank.maximum`
* `morphology.dilate`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
ax.plot(e_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.maximum', 'morphology.dilate'])
"""
.. image:: PLOT2RST.current_figure
Comparison between:
* `filters.rank.median`
* `scipy.ndimage.percentile`
on increasing structuring element size:
"""
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to element size')
ax.plot(e_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Element radius')
"""
.. image:: PLOT2RST.current_figure
Comparison of outcome of the three methods:
"""
fig, ax = plt.subplots()
ax.imshow(np.hstack((rc, rndi)))
ax.set_title('filters.rank.median vs. scipy.ndimage.percentile')
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
and increasing image size:
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype(np.uint8)
(rc, ms_rc) = cr_med(a, elem)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_ndi))
rec = np.asarray(rec)
fig, ax = plt.subplots()
ax.set_title('Performance with respect to image size')
ax.plot(s_range, rec)
ax.legend(['filters.rank.median', 'scipy.ndimage.percentile'])
ax.set_ylabel('Time (ms)')
ax.set_xlabel('Image size')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| bsd-3-clause |
annahs/atmos_research | WHI_long_term_lag_time_testing.py | 1 | 4432 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.cm as cm
from pprint import pprint
import calendar
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
#need to filter on spike times, hk (flow, yag, etc), RH (for in-cloud times), local BB
start_ts = datetime(2009,7,5,12) #2010 - 20100610 2012 - 20100405
end_ts = datetime(2012,6,1) #2010 - 20100726 2012 - 20100601
bin_incr = 10
min_bin = 70
max_bin = 220
timestep = 1 #hours for RH and cluster filtering
#hk parameters
sample_min = 117 #117 for all 2009-2012
sample_max = 123 #123 for all 2009-2012
yag_min = 3.8 #3.8 for all 2009-2012
yag_max = 6 #6 for all 2009-2012
#local BB times
fire_span_09s=datetime.strptime('2009/07/27', '%Y/%m/%d') #dates follwing Takahama et al (2011) doi:10.5194/acp-11-6367-2011
fire_span_09f=datetime.strptime('2009/08/08', '%Y/%m/%d')
fire_span_10s=datetime.strptime('2010/07/26 09:00', '%Y/%m/%d %H:%M') #jason's BC clear report
fire_span_10f=datetime.strptime('2010/07/28 09:30', '%Y/%m/%d %H:%M')
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
cursor.execute('''(SELECT
hrly.mean_lag_time,
hy.precip_total,
hy.precip_first_72h,
hy.precip_last_72h,
hy.cluster_number_1h,
hy.cluster_number_6h,
met.RH
FROM whi_sp2_hourly_data hrly
JOIN whi_hysplit_hourly_data hy on hrly.hysplit_hourly_id = hy.id
JOIN whi_sampling_conditions met on hrly.whi_sampling_cond_id = met.id
WHERE
met.RH <= %s
AND hrly.id > %s
AND hrly.mean_lag_time IS NOT NULL)''',
(101,0))
hrly_data = cursor.fetchall()
npac = []
spac = []
wpac = []
ncan = []
test = []
hp = []
lp = []
for row in hrly_data:
thickly_coated = False
heavy_precip = False
lag = row[0]
precip_total = row[1]
precip_first_72h = row[2]
precip_last_72h = row[3]
cluster_number_1h = row[4]
cluster_number_6h = row[5]
RH = row[6]
if RH >= 90:
heavy_precip = True
hp.append(lag)
else:
lp.append(lag)
if lag >= 1:
thickly_coated = True
if cluster_number_6h in [1,3,5,10]:
npac.append([lag,precip_total,heavy_precip,thickly_coated])
test.append([lag,precip_total,heavy_precip,thickly_coated])
if cluster_number_6h in [2,7]:
wpac.append([lag,precip_total,heavy_precip,thickly_coated])
test.append([lag,precip_total,heavy_precip,thickly_coated])
if cluster_number_6h == 4:
ncan.append([lag,precip_total,heavy_precip,thickly_coated])
test.append([lag,precip_total,heavy_precip,thickly_coated])
if cluster_number_6h in [6,8,9]:
spac.append([lag,precip_total,heavy_precip,thickly_coated])
test.append([lag,precip_total,heavy_precip,thickly_coated])
print 'hp',np.median(hp),'lp',np.median(lp)
hc = 0
tc = 0
for lag in lp:
if lag >=2:
hc += 1
print hc*1.0/len(lp)
print 'npac', 'f_hp', sum([row[2] for row in npac])*1.0/len(npac), 'f_tc', sum([row[3] for row in npac])*1.0/len(npac), len(npac)
print 'wpac', 'f_hp', sum([row[2] for row in wpac])*1.0/len(wpac), 'f_tc', sum([row[3] for row in wpac])*1.0/len(wpac), len(wpac)
print 'ncan', 'f_hp', sum([row[2] for row in ncan])*1.0/len(ncan), 'f_tc', sum([row[3] for row in ncan])*1.0/len(ncan), len(ncan)
print 'spac', 'f_hp', sum([row[2] for row in spac])*1.0/len(spac), 'f_tc', sum([row[3] for row in spac])*1.0/len(spac), len(spac)
fig = plt.figure()
ax1 = fig.add_subplot(111)
#ax1.scatter([row[1] for row in npac],[row[0] for row in npac],color = 'b')
#ax1.scatter([row[1] for row in wpac],[row[0] for row in wpac],color = 'g')
#ax1.scatter([row[1] for row in ncan],[row[0] for row in ncan],color = 'r')
#ax1.scatter([row[1] for row in spac],[row[0] for row in spac],color = 'c')
#ax1.hexbin([row[1] for row in test],[row[0] for row in test], cmap=cm.jet, gridsize = 50,mincnt=1)
#ax1.hist([row[1] for row in test],bins=40)
ax1.hist(lp,bins=40,color = 'r')
ax1.hist(hp,bins=40,color = 'b')
#ax1.hist([row[0] for row in npac],bins=40,color = 'b')
#ax1.hist([row[0] for row in wpac],bins=40,color = 'g')
#ax1.hist([row[0] for row in ncan],bins=40,color = 'r')
#ax1.hist([row[0] for row in spac],bins=40,color = 'c')
#ax1.hist([row[0] for row in test],bins=40,color = 'c')
#ax1.scatter(precips_total,lag_times)
#ax1.axvline(30)
#ax1.axhline(1)
ax1.set_ylabel('lags')
ax1.set_xlabel('precip')
plt.show() | mit |
aminert/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
cbosoft/pi_rheo_proj | etc/rd.py | 1 | 2119 | from glob import glob
from sys import path
import numpy as np
from matplotlib import use as mpluse
mpluse('Agg')
import matplotlib.pyplot as plt
path.append("./../bin/")
from plothelp import plot_fit
from plothelp import read_logf
from filter import filter
import resx
I_EMFs = list()
T_MSs = list()
aliemfs = list()
altms = list()
ref_logs = glob("./../logs/mcal*11.10*.csv")
for i in range(0, len(ref_logs)):
print "Calculating...",
print " {}".format(ref_logs[i])
__, st, __, __, f_spd1, r_spd1, f_spd2, r_spd2, cra, crb, T, Vpz, Vms, gamma_dot, tau, tag = read_logf(ref_logs[i])
# mcal_[name]_[viscosity]_[date+time].csv
v_term = ref_logs[i].split('_')[2]
print "\tVisc. Term:", v_term
viscosity = 0.0
try:
viscosity = float(v_term) # if is any of the 'smart' options, this will not work
except:
try:
viscosity = resx.get_mu_of_T(v_term, T) # will not work if is mixture
except:
parts = v_term.split("@")
viscosity = resx.get_mu_of_T(parts[0], T, parts[1]) # will not work if is wrong
print "\tViscosity:", np.average(viscosity)
## filtering!
Vms = filter(st, Vms)
cra = filter(st, cra)
I_MS = resx.get_current(cra)
I_CO = resx.get_current_coil(Vms)
I_EMF = [0.0] * len(I_MS)
for j in range(0, len(I_MS)):
I_EMF[j] = I_MS[j] - I_CO[j]
aliemfs.extend(I_EMF)
I_EMFs.append(np.mean(I_EMF))
stress = viscosity * gamma_dot
torque = resx.get_torque(stress, 15)
#print "\tStrain: ", np.average(gamma_dot)
#print "\tStress: ", stress
#print "\tTorque: ", torque
#print "\tI emf : ", I_EMFs[-1]
altms.extend(torque)
T_MSs.append(np.mean(torque))
#print T_MSs
#print I_EMFs
fit, f_eqn, mot_cal = plot_fit(I_EMFs, T_MSs, 1, x_name="Iemf", y_name="T")
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(aliemfs, altms, "x")
ax.plot(I_EMFs, T_MSs, "o")
ax.plot(I_EMFs, fit)
#plt.show()
plt.savefig("trd.png")
print "New fit:"
print "\tT = Iemf * {} + {}".format(mot_cal[0], mot_cal[1])
| gpl-3.0 |
nlholdem/icodoom | tmp/test_bp_learning_with_filters.py | 2 | 1610 | #!/usr/bin/python3
import deep_feedback_learning
import numpy as np
import matplotlib.pyplot as plt
print("testBackpropWithFilters")
with open('test_bp_filt_py.csv', 'wb') as csvfile:
csvfile.close()
with open('test_bp_filt_py.csv', 'ab') as csvfile:
# two input neurons, two hidden ones and one output neuron
# two filters and min temp filter is 10 pixels and max 100 pixels
nFiltersInput = 10
nFiltersHidden = 10
# nFiltersHidden = 0 means that the layer is linear without filters
minT = 3
maxT = 15
net = deep_feedback_learning.DeepFeedbackLearning(2, [2], 1, nFiltersInput, nFiltersHidden, minT,maxT)
# init the weights
net.initWeights(0.01);
net.setAlgorithm(deep_feedback_learning.DeepFeedbackLearning.backprop);
net.setLearningRate(1)
net.seedRandom(88)
#net.random_seed(10)
# create the input arrays in numpy fashion
inp = np.zeros(2)
err = np.zeros(1)
maxstep = 12000
outp = np.zeros(maxstep)
rep = 1000
for i in range(maxstep):
if (((i%rep) > 100) and ((i%rep)<103)):
inp[0] = 1
else :
inp[0] = 0
if (((i%rep) > 110) and ((i%rep)<115) and (i < 9000)) :
err[0] = 1
else :
err[0] = 0
# does both forward propagation and backpropagation
net.doStep(inp,err)
# gets the output of the output neuron
outp[i] = net.getOutput(0)
np.savetxt(csvfile,np.hstack((inp,err,outp[i])),delimiter="\t",newline="\t")
crlf="\n"
csvfile.write(crlf.encode())
plt.plot(outp)
plt.show()
| gpl-3.0 |
mandli/surge-examples | mumbai/setplot.py | 1 | 16046 |
"""
Set up the plot figures, axes, and items to be done for each frame.
This module is imported by the plotting routines and then the
function setplot is called to set the plot parameters.
"""
article = False
import os
import numpy
import scipy.io
# Plot customization
import matplotlib
# Use LaTeX for all text
matplotlib.rcParams['text.usetex'] = True
# Markers and line widths
matplotlib.rcParams['lines.linewidth'] = 2.0
matplotlib.rcParams['lines.markersize'] = 6
matplotlib.rcParams['lines.markersize'] = 8
# Font Sizes
matplotlib.rcParams['font.size'] = 16
matplotlib.rcParams['axes.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 12
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['ytick.labelsize'] = 16
# DPI of output images
if article:
matplotlib.rcParams['savefig.dpi'] = 300
else:
matplotlib.rcParams['savefig.dpi'] = 100
import matplotlib.pyplot as plt
import datetime
from clawpack.visclaw import colormaps
import clawpack.clawutil.data as clawutil
import clawpack.amrclaw.data as amrclaw
import clawpack.geoclaw.data as geodata
import clawpack.geoclaw.surge.plot as surge
try:
from setplotfg import setplotfg
except:
setplotfg = None
storm_num = 1
# Gauge support
days2seconds = lambda days: days * 60.0**2 * 24.0
date2seconds = lambda date: days2seconds(date.days) + date.seconds
seconds2days = lambda secs: secs / (24.0 * 60.0**2)
min2deg = lambda minutes: minutes / 60.0
ft2m = lambda x: 0.3048 * x
# Gauge name translation
gauge_landfall = []
for i in xrange(3):
if storm_num == 1:
# Storm 1
gauge_landfall.append(datetime.datetime(1997, 11, 15, 3) - datetime.datetime(1997, 1, 1, 0))
elif storm_num == 2:
# Storm 2
gauge_landfall.append(datetime.datetime(2008, 12, 17, 0) - datetime.datetime(2008, 1, 1, 0))
def setplot(plotdata):
r"""Setplot function for surge plotting"""
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
fig_num_counter = surge.figure_counter()
# Load data from output
clawdata = clawutil.ClawInputData(2)
clawdata.read(os.path.join(plotdata.outdir,'claw.data'))
amrdata = amrclaw.AmrclawInputData(clawdata)
amrdata.read(os.path.join(plotdata.outdir,'amr.data'))
physics = geodata.GeoClawData()
physics.read(os.path.join(plotdata.outdir,'geoclaw.data'))
surge_data = geodata.SurgeData()
surge_data.read(os.path.join(plotdata.outdir,'surge.data'))
friction_data = geodata.FrictionData()
friction_data.read(os.path.join(plotdata.outdir,'friction.data'))
# Load storm track
track = surge.track_data(os.path.join(plotdata.outdir,'fort.track'))
# Calculate landfall time, off by a day, maybe leap year issue?
if storm_num == 1:
# Storm 1
landfall_dt = datetime.datetime(1997, 11, 15, 3) - datetime.datetime(1997, 1, 1, 0)
elif storm_num == 2:
# Storm 2
landfall_dt = datetime.datetime(2008, 12, 17, 0) - datetime.datetime(2008, 1, 1, 0)
landfall = (landfall_dt.days) * 24.0 * 60**2 + landfall_dt.seconds
# Set afteraxes function
surge_afteraxes = lambda cd: surge.surge_afteraxes(cd,
track, landfall, plot_direction=False)
# Color limits
surface_range = 5.0
speed_range = 3.0
eta = physics.sea_level
if not isinstance(eta,list):
eta = [eta]
surface_limits = [-5.0, 5.0]
# surface_contours = numpy.linspace(-surface_range, surface_range,11)
surface_contours = [-5,-4.5,-4,-3.5,-3,-2.5,-2,-1.5,-1,-0.5,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5]
surface_ticks = [-5,-4,-3,-2,-1,0,1,2,3,4,5]
surface_labels = [str(value) for value in surface_ticks]
speed_limits = [0.0,speed_range]
speed_contours = numpy.linspace(0.0,speed_range,13)
speed_ticks = [0,1,2,3]
speed_labels = [str(value) for value in speed_ticks]
# wind_limits = [0,64]
wind_limits = [0,50]
# wind_limits = [-0.002,0.002]
pressure_limits = [935,1013]
friction_bounds = [0.01,0.04]
# vorticity_limits = [-1.e-2,1.e-2]
# def pcolor_afteraxes(current_data):
# surge_afteraxes(current_data)
# surge.gauge_locations(current_data,gaugenos=[6])
def afteraxes(current_data):
surge_afteraxes(current_data)
def add_custom_colorbar_ticks_to_axes(axes, item_name, ticks, tick_labels=None):
axes.plotitem_dict[item_name].colorbar_ticks = ticks
axes.plotitem_dict[item_name].colorbar_tick_labels = tick_labels
# ==========================================================================
# ==========================================================================
# Plot specifications
# ==========================================================================
# ==========================================================================
regions = {"Full Domain (Grids)": [[clawdata.lower[0], clawdata.upper[0]],
[clawdata.lower[1], clawdata.upper[1]],
[1, 1, 1, 1, 1, 1, 1]],
"Mumbai Regio (Grids)": [[70, 75], [17, 22], [1, 1, 1, 1, 1, 1, 1]],
"Mumbai (Grids)": [[72.6, 73.15], [18.80, 19.25], [1, 1, 1, 1, 1, 1, 1]],
"Full Domain (No Grids)": [[clawdata.lower[0], clawdata.upper[0]],
[clawdata.lower[1], clawdata.upper[1]],
[0, 0, 0, 0, 0, 0, 0]],
"Mumbai Region (No Grids)": [[70, 75], [17, 22], [0, 0, 0, 0, 0, 0, 0]],
"Mumbai (No Grids)": [[72.6, 73.15], [18.80, 19.25], [0, 0, 0, 0, 0, 0, 0]]}
full_xlimits = regions['Full Domain (Grids)'][0]
full_ylimits = regions['Full Domain (Grids)'][1]
for (name, region_data) in regions.iteritems():
#
# Surface
#
plotfigure = plotdata.new_plotfigure(name='Surface - %s' % name,
figno=fig_num_counter.get_counter())
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.xlimits = region_data[0]
plotaxes.ylimits = region_data[1]
plotaxes.afteraxes = afteraxes
surge.add_surface_elevation(plotaxes, plot_type='pcolor',
bounds=surface_limits)
# surge.add_surface_elevation(plotaxes, plot_type='contourf',
# contours=surface_contours)
surge.add_land(plotaxes,topo_min=-10.0,topo_max=5.0)
surge.add_bathy_contours(plotaxes)
if article:
plotaxes.plotitem_dict['surface'].add_colorbar = False
else:
add_custom_colorbar_ticks_to_axes(plotaxes, 'surface', surface_ticks, surface_labels)
plotaxes.plotitem_dict['land'].amr_patchedges_show = region_data[2]
plotaxes.plotitem_dict['surface'].amr_patchedges_show = region_data[2]
#
# Water Speed
#
plotfigure = plotdata.new_plotfigure(name='Currents - %s' % name,
figno=fig_num_counter.get_counter())
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Currents'
plotaxes.scaled = True
plotaxes.xlimits = region_data[0]
plotaxes.ylimits = region_data[1]
plotaxes.afteraxes = afteraxes
# Speed
surge.add_speed(plotaxes, plot_type='pcolor', bounds=speed_limits)
# surge.add_speed(plotaxes, plot_type='contourf', contours=speed_contours)
if article:
plotaxes.plotitem_dict['speed'].add_colorbar = False
else:
add_custom_colorbar_ticks_to_axes(plotaxes, 'speed', speed_ticks, speed_labels)
# Land
surge.add_land(plotaxes,topo_min=-10.0,topo_max=5.0)
plotaxes.plotitem_dict['speed'].amr_patchedges_show = region_data[2]
plotaxes.plotitem_dict['land'].amr_patchedges_show = region_data[2]
#
# Friction field
#
plotfigure = plotdata.new_plotfigure(name='Friction',
figno=fig_num_counter.get_counter())
plotfigure.show = friction_data.variable_friction and False
def friction_after_axes(cd):
plt.subplots_adjust(left=0.08, bottom=0.04, right=0.97, top=0.96)
plt.title(r"Manning's $n$ Coefficient")
# surge_afteraxes(cd)
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = full_xlimits
plotaxes.ylimits = full_ylimits
# plotaxes.title = "Manning's N Coefficient"
plotaxes.afteraxes = friction_after_axes
plotaxes.scaled = True
surge.add_friction(plotaxes, bounds=friction_bounds)
plotaxes.plotitem_dict['friction'].amr_patchedges_show = [0,0,0,0,0,0,0]
plotaxes.plotitem_dict['friction'].colorbar_label = "$n$"
# ==========================
# Hurricane Forcing fields
# ==========================
grids = [[0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1]]
label = ["(No Grids)", "(Grids)"]
for i in xrange(2):
# Pressure field
plotfigure = plotdata.new_plotfigure(name='Pressure %s' % label[i],
figno=fig_num_counter.get_counter())
plotfigure.show = surge_data.pressure_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = full_xlimits
plotaxes.ylimits = full_ylimits
plotaxes.title = "Pressure Field"
plotaxes.afteraxes = afteraxes
plotaxes.scaled = True
surge.add_pressure(plotaxes, bounds=pressure_limits)
surge.add_land(plotaxes)
plotaxes.plotitem_dict['pressure'].amr_patchedges_show = grids[i]
plotaxes.plotitem_dict['land'].amr_patchedges_show = grids[i]
# Wind field
plotfigure = plotdata.new_plotfigure(name='Wind Speed %s' % label[i],
figno=fig_num_counter.get_counter())
plotfigure.show = surge_data.wind_forcing and True
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = full_xlimits
plotaxes.ylimits = full_ylimits
plotaxes.title = "Wind Field"
plotaxes.afteraxes = afteraxes
plotaxes.scaled = True
surge.add_wind(plotaxes, bounds=wind_limits, plot_type='pcolor')
surge.add_land(plotaxes)
plotaxes.plotitem_dict['wind'].amr_patchedges_show = grids[i]
plotaxes.plotitem_dict['land'].amr_patchedges_show = grids[i]
# =====================
# Gauge Location Plot
# =====================
gauge_xlimits = regions["Mumbai (Grids)"][0]
gauge_ylimits = regions["Mumbai (Grids)"][1]
# gauge_location_shrink = 0.75
def gauge_after_axes(cd):
# plt.subplots_adjust(left=0.12, bottom=0.06, right=0.97, top=0.97)
surge_afteraxes(cd)
# import pdb; pdb.set_trace()
surge.gauge_locations(cd, gaugenos=[1, 2, 3])
plt.title("Gauge Locations")
plotfigure = plotdata.new_plotfigure(name='Gauge Locations',
figno=fig_num_counter.get_counter())
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.title = 'Surface'
plotaxes.scaled = True
plotaxes.xlimits = gauge_xlimits
plotaxes.ylimits = gauge_ylimits
plotaxes.afteraxes = gauge_after_axes
surge.add_surface_elevation(plotaxes, plot_type='pcolor',
bounds=surface_limits)
# surge.plot.add_surface_elevation(plotaxes, plot_type="contourf")
add_custom_colorbar_ticks_to_axes(plotaxes, 'surface', surface_ticks, surface_labels)
surge.add_land(plotaxes)
# plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0,0,0,0,0,0,0]
# plotaxes.plotitem_dict['surface'].add_colorbar = False
# plotaxes.plotitem_dict['surface'].pcolor_cmap = plt.get_cmap('jet')
# plotaxes.plotitem_dict['surface'].pcolor_cmap = plt.get_cmap('gist_yarg')
# plotaxes.plotitem_dict['surface'].pcolor_cmin = 0.0
# plotaxes.plotitem_dict['surface'].pcolor_cmax = 5.0
plotaxes.plotitem_dict['surface'].amr_patchedges_show = [0,0,0,0,0,0,0]
plotaxes.plotitem_dict['land'].amr_patchedges_show = [0,0,0,0,0,0,0]
# ========================================================================
# Figures for gauges
# ========================================================================
plotfigure = plotdata.new_plotfigure(name='Surface & topo', figno=300, \
type='each_gauge')
plotfigure.show = True
plotfigure.clf_each_gauge = True
# plotfigure.kwargs['figsize'] = (16,10)
def gauge_after_axes(cd):
if cd.gaugeno in [1, 2, 3]:
axes = plt.gca()
# Add GeoClaw gauge data
geoclaw_gauge = cd.gaugesoln
axes.plot(seconds2days(geoclaw_gauge.t - date2seconds(gauge_landfall[1])),
geoclaw_gauge.q[3,:], 'b--')
# Fix up plot
axes.set_title('Station %s' % cd.gaugeno)
axes.set_xlabel('Days relative to landfall')
axes.set_ylabel('Surface (m)')
axes.set_xlim([-2,1])
axes.set_ylim([-1,5])
axes.set_xticks([-2,-1,0,1])
axes.set_xticklabels([r"$-2$",r"$-1$",r"$0$",r"$1$"])
axes.grid(True)
axes.legend()
plt.hold(False)
# surge.gauge_afteraxes(cd)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-2,1]
# plotaxes.xlabel = "Days from landfall"
# plotaxes.ylabel = "Surface (m)"
plotaxes.ylimits = [-1,5]
plotaxes.title = 'Surface'
plotaxes.afteraxes = gauge_after_axes
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
if article:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = [54,60,66,72,78,84] # list of frames to print
plotdata.print_gaugenos = [1,2,3] # list of gauges to print
plotdata.print_fignos = [4,5,6,7,10,3,300] # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = False # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
else:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = [1,2,3] # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
| mit |
zorroblue/scikit-learn | sklearn/metrics/tests/test_classification.py | 10 | 62931 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import MockDataFrame
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.exceptions import UndefinedMetricWarning
from scipy.spatial.distance import hamming as sp_hamming
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='binary')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_precision_recall_f_unused_pos_label():
# Check warning that pos_label unused when set to non-default value
# but average != 'binary'; even if data is binary.
assert_warns_message(UserWarning,
"Note that pos_label (set to 2) is "
"ignored when average != 'binary' (got 'macro'). You "
"may use labels=[pos_label] to specify a single "
"positive class.", precision_recall_fscore_support,
[1, 2, 1], [1, 2, 2], pos_label=2, average='macro')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
# Weighting example: none, linear, quadratic.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 50 + [1] * 40 + [2] * 10)
assert_almost_equal(cohen_kappa_score(y1, y2), .9315, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="linear"), .9412, decimal=4)
assert_almost_equal(cohen_kappa_score(y1, y2, weights="quadratic"), .9541, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_matthews_corrcoef_against_numpy_corrcoef():
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
np.corrcoef(y_true, y_pred)[0, 1], 10)
def test_matthews_corrcoef_against_jurman():
# Check that the multiclass matthews_corrcoef agrees with the definition
# presented in Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC
# and CEN Error Measures in MultiClass Prediction
rng = np.random.RandomState(0)
y_true = rng.randint(0, 2, size=20)
y_pred = rng.randint(0, 2, size=20)
sample_weight = rng.rand(20)
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
N = len(C)
cov_ytyp = sum([
C[k, k] * C[m, l] - C[l, k] * C[k, m]
for k in range(N) for m in range(N) for l in range(N)
])
cov_ytyt = sum([
C[:, k].sum() *
np.sum([C[g, f] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
cov_ypyp = np.sum([
C[k, :].sum() *
np.sum([C[f, g] for f in range(N) for g in range(N) if f != k])
for k in range(N)
])
mcc_jurman = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
mcc_ours = matthews_corrcoef(y_true, y_pred, sample_weight)
assert_almost_equal(mcc_ours, mcc_jurman, 10)
def test_matthews_corrcoef():
rng = np.random.RandomState(0)
y_true = ["a" if i == 0 else "b" for i in rng.randint(0, 2, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# corrcoef, when the two vectors are opposites of each other, should be -1
y_true_inv = ["b" if i == "a" else "a" for i in y_true]
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv), -1)
y_true_inv2 = label_binarize(y_true, ["a", "b"])
y_true_inv2 = np.where(y_true_inv2, 'a', 'b')
assert_almost_equal(matthews_corrcoef(y_true, y_true_inv2), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, [0, 0, 0, 0], [0, 0, 0, 0])
# But will output 0
assert_almost_equal(mcc, 0.)
# And also for any other vector with 0 variance
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, ['a'] * len(y_true))
# But will output 0
assert_almost_equal(mcc, 0.)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1]
y_2 = [1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# Check that sample weight is able to selectively exclude
mask = [1] * 10 + [0] * 10
# Now the first half of the vector elements are alone given a weight of 1
# and hence the mcc will not be a perfect 0 as in the previous case
assert_raises(AssertionError, assert_almost_equal,
matthews_corrcoef(y_1, y_2, sample_weight=mask), 0.)
def test_matthews_corrcoef_multiclass():
rng = np.random.RandomState(0)
ord_a = ord('a')
n_classes = 4
y_true = [chr(ord_a + i) for i in rng.randint(0, n_classes, size=20)]
# corrcoef of same vectors must be 1
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
# with multiclass > 2 it is not possible to achieve -1
y_true = [0, 0, 1, 1, 2, 2]
y_pred_bad = [2, 2, 0, 0, 1, 1]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_bad), -.5)
# Maximizing false positives and negatives minimizes the MCC
# The minimum will be different for depending on the input
y_true = [0, 0, 1, 1, 2, 2]
y_pred_min = [1, 1, 0, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred_min),
-12 / np.sqrt(24 * 16))
# Zero variance will result in an mcc of zero and a Runtime Warning
y_true = [0, 1, 2]
y_pred = [3, 3, 3]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred)
assert_almost_equal(mcc, 0.0)
# These two vectors have 0 correlation and hence mcc should be 0
y_1 = [0, 1, 2, 0, 1, 2, 0, 1, 2]
y_2 = [1, 1, 1, 2, 2, 2, 0, 0, 0]
assert_almost_equal(matthews_corrcoef(y_1, y_2), 0.)
# We can test that binary assumptions hold using the multiclass computation
# by masking the weight of samples not in the first two classes
# Masking the last label should let us get an MCC of -1
y_true = [0, 0, 1, 1, 2]
y_pred = [1, 1, 0, 0, 2]
sample_weight = [1, 1, 1, 1, 0]
assert_almost_equal(matthews_corrcoef(y_true, y_pred, sample_weight), -1)
# For the zero vector case, the corrcoef cannot be calculated and should
# result in a RuntimeWarning
y_true = [0, 0, 1, 2]
y_pred = [0, 0, 1, 2]
sample_weight = [1, 1, 0, 0]
mcc = assert_warns_message(RuntimeWarning, 'invalid value encountered',
matthews_corrcoef, y_true, y_pred,
sample_weight)
# But will output 0
assert_almost_equal(mcc, 0.)
def test_matthews_corrcoef_overflow():
# https://github.com/scikit-learn/scikit-learn/issues/9622
rng = np.random.RandomState(20170906)
def mcc_safe(y_true, y_pred):
conf_matrix = confusion_matrix(y_true, y_pred)
true_pos = conf_matrix[1, 1]
false_pos = conf_matrix[1, 0]
false_neg = conf_matrix[0, 1]
n_points = len(y_true)
pos_rate = (true_pos + false_neg) / n_points
activity = (true_pos + false_pos) / n_points
mcc_numerator = true_pos / n_points - pos_rate * activity
mcc_denominator = activity * pos_rate * (1 - activity) * (1 - pos_rate)
return mcc_numerator / np.sqrt(mcc_denominator)
def random_ys(n_points): # binary
x_true = rng.random_sample(n_points)
x_pred = x_true + 0.2 * (rng.random_sample(n_points) - 0.5)
y_true = (x_true > 0.5)
y_pred = (x_pred > 0.5)
return y_true, y_pred
for n_points in [100, 10000, 1000000]:
arr = np.repeat([0., 1.], n_points) # binary
assert_almost_equal(matthews_corrcoef(arr, arr), 1.0)
arr = np.repeat([0., 1., 2.], n_points) # multiclass
assert_almost_equal(matthews_corrcoef(arr, arr), 1.0)
y_true, y_pred = random_ys(n_points)
assert_almost_equal(matthews_corrcoef(y_true, y_true), 1.0)
assert_almost_equal(matthews_corrcoef(y_true, y_pred),
mcc_safe(y_true, y_pred))
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_binary_averaged():
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
ps, rs, fs, _ = precision_recall_fscore_support(y_true, y_pred,
average=None)
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='macro')
assert_equal(p, np.mean(ps))
assert_equal(r, np.mean(rs))
assert_equal(f, np.mean(fs))
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
average='weighted')
support = np.bincount(y_true)
assert_equal(p, np.average(ps, weights=support))
assert_equal(r, np.average(rs, weights=support))
assert_equal(f, np.average(fs, weights=support))
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='macro'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='macro'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='macro'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_sample_weight():
"""Test confusion matrix - case with sample_weight"""
y_true, y_pred, _ = make_prediction(binary=False)
weights = [.1] * 25 + [.2] * 25 + [.3] * 25
cm = confusion_matrix(y_true, y_pred, sample_weight=weights)
true_cm = (.1 * confusion_matrix(y_true[:25], y_pred[:25]) +
.2 * confusion_matrix(y_true[25:50], y_pred[25:50]) +
.3 * confusion_matrix(y_true[50:], y_pred[50:]))
assert_array_almost_equal(cm, true_cm)
assert_raises(
ValueError, confusion_matrix, y_true, y_pred,
sample_weight=weights[:-1])
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
# a label not in y_true should result in zeros for that row/column
extra_label = np.max(y_true) + 1
cm = confusion_matrix(y_true, y_pred, labels=[2, extra_label])
assert_array_equal(cm, [[18, 0],
[0, 0]])
# check for exception when none of the specified labels are in y_true
assert_raises(ValueError, confusion_matrix, y_true, y_pred,
labels=[extra_label, extra_label + 1])
def test_confusion_matrix_dtype():
y = [0, 1, 1]
weight = np.ones(len(y))
# confusion_matrix returns int64 by default
cm = confusion_matrix(y, y)
assert_equal(cm.dtype, np.int64)
# The dtype of confusion_matrix is always 64 bit
for dtype in [np.bool_, np.int32, np.uint64]:
cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype))
assert_equal(cm.dtype, np.int64)
for dtype in [np.float32, np.float64, None, object]:
cm = confusion_matrix(y, y, sample_weight=weight.astype(dtype))
assert_equal(cm.dtype, np.float64)
# np.iinfo(np.uint32).max should be accumulated correctly
weight = np.ones(len(y), dtype=np.uint32) * 4294967295
cm = confusion_matrix(y, y, sample_weight=weight)
assert_equal(cm[0, 0], 4294967295)
assert_equal(cm[1, 1], 8589934590)
# np.iinfo(np.int64).max should cause an overflow
weight = np.ones(len(y), dtype=np.int64) * 9223372036854775807
cm = confusion_matrix(y, y, sample_weight=weight)
assert_equal(cm[0, 0], 9223372036854775807)
assert_equal(cm[1, 1], -2)
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green"*5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
greengreengreengreengreen 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_labels_target_names_unequal_length():
y_true = [0, 0, 2, 0, 0]
y_pred = [0, 2, 2, 0, 0]
target_names = ['class 0', 'class 1', 'class 2']
assert_warns_message(UserWarning,
"labels size, 2, does not "
"match size of target_names, 3",
classification_report,
y_true, y_pred, target_names=target_names)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
w = np.array([1, 3])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
assert_equal(hamming_loss(y1, y2, sample_weight=w), 1. / 12)
assert_equal(hamming_loss(y1, 1-y2, sample_weight=w), 11. / 12)
assert_equal(hamming_loss(y1, np.zeros_like(y1), sample_weight=w), 2. / 3)
# sp_hamming only works with 1-D arrays
assert_equal(hamming_loss(y1[0], y2[0]), sp_hamming(y1[0], y2[0]))
assert_warns(DeprecationWarning, hamming_loss, y1, y2, classes=[0, 1])
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single positive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='binary')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='binary')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_binary_data_non_binary():
# Error if user does not explicitly set non-binary average mode
y_true_mc = [1, 2, 3, 3]
y_pred_mc = [1, 2, 3, 1]
y_true_ind = np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])
y_pred_ind = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
for y_true, y_pred, y_type in [
(y_true_mc, y_pred_mc, 'multiclass'),
(y_true_ind, y_pred_ind, 'multilabel-indicator'),
]:
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
assert_raise_message(ValueError,
"Target is %s but average='binary'. Please "
"choose another average setting." % y_type,
metric, y_true, y_pred)
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Classification metrics can't handle a mix of {0} and {1} "
"targets".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test__check_targets_multiclass_with_both_y_true_and_y_pred_binary():
# https://github.com/scikit-learn/scikit-learn/issues/8098
y_true = [0, 1]
y_pred = [0, -1]
assert_equal(_check_targets(y_true, y_pred)[0], 'multiclass')
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = ('y_true contains only one label (2). Please provide '
'the true labels explicitly through the labels argument.')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = ('Found input variables with inconsistent numbers of samples: '
'[3, 2]')
assert_raise_message(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
def test_log_loss_pandas_input():
# case when input is a pandas series and dataframe gh-5715
y_tr = np.array(["ham", "spam", "spam", "ham"])
y_pr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TrueInputType, PredInputType in types:
# y_pred dataframe, y_true series
y_true, y_pred = TrueInputType(y_tr), PredInputType(y_pr)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
# calculate even if only single class in y_true (#6980)
assert_almost_equal(brier_score_loss([0], [0.5]), 0.25)
assert_almost_equal(brier_score_loss([1], [0.5]), 0.25)
| bsd-3-clause |
TiKeil/Master-thesis-LOD | python_files/tests/tests/VCLOD_Coefficient_1.py | 1 | 10923 | # This file is part of the master thesis "Variational crimes in the Localized orthogonal decomposition method":
# https://github.com/TiKeil/Masterthesis-LOD.git
# Copyright holder: Tim Keil
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import os
import sys
import numpy as np
import scipy.sparse as sparse
import random
import csv
import matplotlib.pyplot as plt
from visualize import drawCoefficient
from data import *
from gridlod import interp, coef, util, fem, world, linalg, femsolver
import pg_rand, femsolverCoarse, buildcoef2d
from gridlod.world import Worldfrom gridlod.world import World
def result(pglod, world, A, R, f, k, String):
print "-------------- " + String + " ---------------"
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
# new Coefficient
ANew = R.flatten()
Anew = coef.coefficientFine(NWorldCoarse, NCoarseElement, ANew)
# reference solution
f_fine = np.ones(NpFine)
uFineFem, AFine, MFine = femsolver.solveFine(world, ANew, f_fine, None, boundaryConditions)
# worst solution
KFull = pglod.assembleMsStiffnessMatrix()
MFull = fem.assemblePatchMatrix(NWorldCoarse, world.MLocCoarse)
free = util.interiorpIndexMap(NWorldCoarse)
bFull = MFull*f
KFree = KFull[free][:,free]
bFree = bFull[free]
xFree = sparse.linalg.spsolve(KFree, bFree)
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
basisCorrectors = pglod.assembleBasisCorrectors()
modifiedBasis = basis - basisCorrectors
xFull = np.zeros(NpCoarse)
xFull[free] = xFree
uCoarse = xFull
uLodFine = modifiedBasis*xFull
uLodFineWorst = uLodFine
# energy error
errorworst = np.sqrt(np.dot(uFineFem - uLodFineWorst, AFine*(uFineFem - uLodFineWorst)))
# tolerance = 0
vis, eps = pglod.updateCorrectors(Anew, 0, f, 1, clearFineQuantities=False, Computing=False)
PotentialCorrectors = np.sum(vis)
elemente = np.arange(np.prod(NWorldCoarse))
# identify tolerances
epsnozero = filter(lambda x: x!=0, eps)
assert(np.size(epsnozero) != 0)
mini = np.min(epsnozero)
minilog = int(round(np.log10(mini)-0.49))
epsnozero.append(10**(minilog))
ToleranceListcomplete = []
for i in range(0,int(np.size(epsnozero))):
ToleranceListcomplete.append(epsnozero[i])
ToleranceListcomplete.sort()
ToleranceListcomplete = np.unique(ToleranceListcomplete)
# with tolerance
errorplotinfo = []
tolerancesafe = []
errorBest = []
errorWorst = []
recomputefractionsafe = []
recomputefraction = 0
Correctors = 0
leng = np.size(ToleranceListcomplete)
for k in range(leng-1,-1,-1):
tol = ToleranceListcomplete[k]
print " --- "+ str(-k+leng) + "/" + str(leng)+ " --- Tolerance: " + str(round(tol,5)) + " in "+ String +" ---- ",
vistol = pglod.updateCorrectors(Anew, tol, f, clearFineQuantities=False, Testing=True)
Correctors += np.sum(vistol)
recomputefraction += float(np.sum(vistol))/PotentialCorrectors * 100
recomputefractionsafe.append(recomputefraction)
KFull = pglod.assembleMsStiffnessMatrix()
MFull = fem.assemblePatchMatrix(NWorldCoarse, world.MLocCoarse)
free = util.interiorpIndexMap(NWorldCoarse)
bFull = MFull*f
KFree = KFull[free][:,free]
bFree = bFull[free]
xFree = sparse.linalg.spsolve(KFree, bFree)
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
basisCorrectors = pglod.assembleBasisCorrectors()
modifiedBasis = basis - basisCorrectors
xFull = np.zeros(NpCoarse)
xFull[free] = xFree
uCoarse = xFull
uLodFine = modifiedBasis*xFull
#energy error
errortol = np.sqrt(np.dot(uFineFem - uLodFine, AFine*(uFineFem - uLodFine)))
errorplotinfo.append(errortol)
tolerancesafe.append(tol)
# 100% updating
uLodFinebest = uLodFine
errorbest = np.sqrt(np.dot(uFineFem - uLodFinebest, AFine*(uFineFem - uLodFinebest)))
for k in range(leng-1,-1,-1):
errorBest.append(errorbest)
errorWorst.append(errorworst)
return vis, eps, PotentialCorrectors, recomputefractionsafe, errorplotinfo, errorWorst, errorBest
bg = 0.05 #background
val = 1 #values
#fine World
NWorldFine = np.array([256, 256])
NpFine = np.prod(NWorldFine+1)
#coarse World
NWorldCoarse = np.array([16,16])
NpCoarse = np.prod(NWorldCoarse+1)
#ratio between Fine and Coarse
NCoarseElement = NWorldFine/NWorldCoarse
boundaryConditions = np.array([[0, 0],
[0, 0]])
world = World(NWorldCoarse, NCoarseElement, boundaryConditions)
#righthandside
f = np.ones(NpCoarse)
#Coefficient 1
CoefClass = buildcoef2d.Coefficient2d(NWorldFine,
bg = bg,
val = val,
length = 2,
thick = 2,
space = 2,
probfactor = 1,
right = 1,
down = 0,
diagr1 = 0,
diagr2 = 0,
diagl1 = 0,
diagl2 = 0,
LenSwitch = None,
thickSwitch = None,
equidistant = True,
ChannelHorizontal = None,
ChannelVertical = None,
BoundarySpace = True)
A = CoefClass.BuildCoefficient()
ABase = A.flatten()
ROOT = '../../../test_data/Coef1/'
#safe NworldFine
with open("%s/NWorldFine.txt" % ROOT, 'wb') as csvfile:
writer = csv.writer(csvfile)
for val in NWorldFine:
writer.writerow([val])
#safe NworldCoarse
with open("%s/NWorldCoarse.txt" % ROOT, 'wb') as csvfile:
writer = csv.writer(csvfile)
for val in NWorldCoarse:
writer.writerow([val])
#ABase
with open("%s/OriginalCoeff.txt" % ROOT, 'wb') as csvfile:
writer = csv.writer(csvfile)
for val in ABase:
writer.writerow([val])
#fine-fem
f_fine = np.ones(NpFine)
uFineFem, AFine, MFine = femsolver.solveFine(world, ABase, f_fine, None, boundaryConditions)
#fine solution
with open("%s/finescale.txt" % ROOT, 'wb') as csvfile:
writer = csv.writer(csvfile)
for val in uFineFem:
writer.writerow([val])
plt.figure("Original")
drawCoefficient(NWorldFine, ABase,greys=True)
plt.title("Original coefficient")
plt.show()
# random seed
random.seed(20)
# decision
valc = np.shape(CoefClass.ShapeRemember)[0]
numbers = []
decision = np.zeros(100)
decision[0] = 1
for i in range(0,valc):
a = random.sample(decision,1)[0]
if a == 1:
numbers.append(i)
value1 = 3
C1 = CoefClass.SpecificValueChange(ratio=value1,
Number = numbers,
probfactor=1,
randomvalue=None,
negative=None,
ShapeRestriction=True,
ShapeWave=None,
ChangeRight=1,
ChangeDown=1,
ChangeDiagr1=1,
ChangeDiagr2=1,
ChangeDiagl1=1,
ChangeDiagl2=1,
Original = True,
NewShapeChange = True)
V = CoefClass.SpecificVanish(Number = numbers,
probfactor=1,
PartlyVanish=None,
ChangeRight=1,
ChangeDown=1,
ChangeDiagr1=1,
ChangeDiagr2=1,
ChangeDiagl1=1,
ChangeDiagl2=1,
Original = True)
M1 = CoefClass.SpecificMove(probfactor=1,
Number = numbers,
steps=1,
randomstep=None,
randomDirection=None,
ChangeRight=1,
ChangeDown=1,
ChangeDiagr1=1,
ChangeDiagr2=1,
ChangeDiagl1=1,
ChangeDiagl2=1,
Right=1,
BottomRight=0,
Bottom=0,
BottomLeft=0,
Left=0,
TopLeft=0,
Top=0,
TopRight=0,
Original = True)
k = 4
NWorldFine = world.NWorldFine
NWorldCoarse = world.NWorldCoarse
NCoarseElement = world.NCoarseElement
boundaryConditions = world.boundaryConditions
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
#interpolant
IPatchGenerator = lambda i, N: interp.L2ProjectionPatchMatrix(i, N, NWorldCoarse, NCoarseElement, boundaryConditions)
#old Coefficient
ABase = A.flatten()
Aold = coef.coefficientFine(NWorldCoarse, NCoarseElement, ABase)
pglod = pg_rand.VcPetrovGalerkinLOD(Aold, world, k, IPatchGenerator, 0)
pglod.originCorrectors(clearFineQuantities=False)
vis, eps, PotentialUpdated, recomputefractionsafe, errorplotinfo, errorworst, errorbest = result(pglod ,world, A, C1, f, k, 'Specific value change' + str(value1))
safeChange(ROOT, C1, vis, eps, PotentialUpdated, recomputefractionsafe, errorplotinfo, errorworst, errorbest)
vis, eps, PotentialUpdated, recomputefractionsafe, errorplotinfo, errorworst, errorbest = result(pglod ,world, A, V, f, k, 'Vanish')
safeVanish(ROOT, V, vis, eps, PotentialUpdated, recomputefractionsafe, errorplotinfo, errorworst, errorbest)
vis, eps, PotentialUpdated, recomputefractionsafe, errorplotinfo, errorworst, errorbest = result(pglod ,world, A, M1, f, k, 'One Step Move')
safeShift(ROOT, M1, vis, eps, PotentialUpdated, recomputefractionsafe, errorplotinfo, errorworst, errorbest) | apache-2.0 |
pkhorrami4/chen_huang_experiments | experiments/rnns/1_layer_rnn_h_100_subj_dep/train.py | 5 | 7367 | import argparse
from datetime import datetime
import os
import sys
import yaml
import numpy
import tensorflow as tf
# import matplotlib.pyplot as plt
# get_ipython().magic(u'matplotlib inline')
from chen_huang.datasets.data_loaders import LandmarkDataLoader
from chen_huang.data_batchers.video_seq_generator import FeatureSequenceGenerator
from chen_huang.models.recurrent_model import RecurrentModel
from chen_huang.util.normalizers import LandmarkFeatureNormalizer
# from chen_huang.util.data_augmenter import DataAugmenter
def make_mask(seq_lengths, batch_size, max_sequence_length):
mask = numpy.zeros((batch_size, max_sequence_length), numpy.bool)
for i in range(batch_size):
mask[i, 0:seq_lengths[i]] = 1
return mask
def load_yaml(yaml_file):
with open(yaml_file, 'r') as f:
yaml_dict = yaml.load(f)
return yaml_dict
def parse_args():
parser = argparse.ArgumentParser(description='Script to train RNN/LSTM models. ',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--yaml_file', dest='yaml_file',
help='yaml file containing hyperparameters.',
default='./params.yaml')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == "__main__":
# Load data, model, and training parameters
args = parse_args()
yaml_file = args.yaml_file #sys.argv[1] #'./params.yaml'
param_dict = load_yaml(yaml_file)
data_dict = param_dict['data']
model_dict = param_dict['model']
train_dict = param_dict['train']
dataset_path = data_dict['dataset_path']
feat_type = data_dict['feat_type']
fold_type = data_dict['fold_type']
train_folds = data_dict['train_folds']
val_folds = data_dict['val_folds']
test_folds = data_dict['test_folds']
remove_labels = data_dict['remove_labels']
if 'remove_easy' in data_dict.keys():
remove_easy = data_dict['remove_easy']
else:
remove_easy = False
batch_size = model_dict['batch_size']
max_sequence_length = model_dict['max_sequence_length']
# Load datasets
data_loader_train = LandmarkDataLoader(dataset_path,
feat_type=feat_type,
fold_type=fold_type,
which_fold=train_folds,
remove_easy=remove_easy)
data_dict_train = data_loader_train.load()
data_loader_val = LandmarkDataLoader(dataset_path,
feat_type=feat_type,
fold_type=fold_type,
which_fold=val_folds,
remove_easy=remove_easy)
data_dict_val = data_loader_val.load()
data_loader_test = LandmarkDataLoader(dataset_path,
feat_type=feat_type,
fold_type=fold_type,
which_fold=test_folds,
remove_easy=remove_easy)
data_dict_test = data_loader_test.load()
X_train = data_dict_train['X']
y_train = data_dict_train['y']
X_val = data_dict_val['X']
y_val = data_dict_val['y']
X_test = data_dict_test['X']
y_test = data_dict_test['y']
# Load data batchers
batcher_train = FeatureSequenceGenerator(X_train, y_train, batch_size=batch_size,
max_seq_length=max_sequence_length,
verbose=False)
batcher_val = FeatureSequenceGenerator(X_val, y_val, batch_size=batch_size,
max_seq_length=max_sequence_length,
verbose=False)
batcher_test = FeatureSequenceGenerator(X_test, y_test, batch_size=batch_size,
max_seq_length=max_sequence_length,
verbose=False)
# Load preprocessors
normalizer = LandmarkFeatureNormalizer(X_train)
# Load model
model = RecurrentModel(model_dict, verbose=True)
# Train Loop
total_num_steps = train_dict['total_num_steps']
disp_step = train_dict['disp_step']
val_step = train_dict['val_step']
save_step = train_dict['save_step']
for step in range(total_num_steps):
# Load batch from training set
x_batch, y_batch_temp, seq_length_batch = batcher_train.get_batch()
y_batch = numpy.tile(y_batch_temp[:, None], (1, max_sequence_length))
mask = make_mask(seq_length_batch, batch_size, max_sequence_length)
x_batch_norm = normalizer.run(x_batch)
cost_train, accuracy_train, accuracy_train_clip, summary_train = model.train(x_batch_norm,
y_batch,
seq_length_batch,
mask)
#accuracy_train_clip = model.get_accuracy_clip(x_batch_norm, y_batch, seq_length_batch, mask)
if step % disp_step == 0:
print 'Iter %d --- batch_cost_train: %.4f --- batch_accuracy_train: %.4f --- clip_accuracy_train: %.4f' % (step, cost_train, accuracy_train, accuracy_train_clip)
if step % 10 == 0:
#print 'Added to train summaries'
model.summary_writer_train.add_summary(summary_train, step)
if step % val_step == 0:
x_batch_val, y_batch_temp_val, seq_length_batch_val = batcher_val.get_batch()
y_batch_val = numpy.tile(y_batch_temp_val[:, None], (1, max_sequence_length))
mask_val = make_mask(seq_length_batch_val, batch_size, max_sequence_length)
x_batch_norm_val = normalizer.run(x_batch_val)
#cost_val = model.cost(x_batch_norm_val, y_batch_val, seq_length_batch_val, mask_val)
#accuracy_val = model.get_accuracy(x_batch_norm_val, y_batch_val, seq_length_batch_val, mask_val)
#accuracy_val_clip = model.get_accuracy_clip(x_batch_norm_val, y_batch_val, seq_length_batch_val, mask_val)
cost_val, accuracy_val, accuracy_val_clip, summary_val = model.val_batch(x_batch_norm_val,
y_batch_val,
seq_length_batch_val,
mask_val)
print '*Iter %d --- batch_cost_val: %.4f --- batch_accuracy_val: %.4f --- clip_accuracy_val: %.4f' % (step, cost_val, accuracy_val, accuracy_val_clip)
if step % val_step == 0:
#print 'Added to val summaries'
model.summary_writer_val.add_summary(summary_val, step)
if step % save_step == 0:
dt = datetime.now()
time_str = dt.strftime("%mm-%dd-%yy-%Hh-%Mm-%Ss")
model.save('ckpt_'+time_str)
| gpl-3.0 |
barentsen/dave | extractDetrend/K2photo/makeplot.py | 3 | 2223 | from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import pyfits
import glob
import extract_lc
import progressbar
params = {'backend': 'png',
'axes.linewidth': 2.5,
'axes.labelsize': 24,
'axes.font': 'sans-serif',
'axes.fontweight' : 'bold',
'text.fontsize': 12,
'legend.fontsize': 16,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
# 'text.usetex': True,
# 'font.family': 'sans-serif',
'font.sans-serif': 'Helvetica',
'ps.useafm': True,
'pdf.use14corefonts': True,
'ps.fonttype': 42,
'legend.markersize': 200}
plt.rcParams.update(params)
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
if __name__ == '__main__':
dirname = '/Users/tom/Projects/Debra_data/data/lcfiles/'
files = glob.glob(dirname + '*.txt')
cdpparr = np.zeros(len(files))
bar = progressbar.ProgressBar(maxval=len(files), \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage(),
progressbar.ETA()])
bar.start()
for i,f in enumerate(files):
bar.update(i+1)
fig, (ax1,ax2,ax3) = plt.subplots(3,1, sharex=True, sharey=True,
figsize=[9,12])
t = np.genfromtxt(f).T
ax1.plot(t[0],t[1],c='r')
ax2.plot(t[0],t[2],c='b')
ax3.plot(t[0],t[3],c='g')
#calc CDPP
cdpp = 1.E6 * np.median(
np.std(rolling_window(t[2], 13) / np.sqrt(13), 1))
cdpparr[i] = cdpp
ax1.set_xlim([45.5,77.0])
ax1.set_title('Decor lc,')
ax2.set_title('Decor + medfilt lc, CDPP = {}'.format(cdpp))
ax3.set_title('Decor signal')
plt.tight_layout()
savename = f.split('/')[-1].split('.')[0]
plt.savefig('{}../figs/{}.png'.format(dirname,savename))
plt.close('all')
bar.finish()
np.savetxt('cdpp.txt',np.array([files,cdpparr],dtype=None).T,fmt='%s')
| mit |
nmayorov/scipy | scipy/stats/_continuous_distns.py | 2 | 268387 | # -*- coding: utf-8 -*-
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import warnings
from collections.abc import Iterable
import ctypes
import numpy as np
from scipy._lib.doccer import (extend_notes_in_docstring,
replace_notes_in_docstring)
from scipy._lib._ccallback import LowLevelCallable
from scipy import optimize
from scipy import integrate
from scipy import interpolate
import scipy.special as sc
import scipy.special._ufuncs as scu
from scipy._lib._util import _lazyselect, _lazywhere
from . import _stats
from ._rvs_sampling import rvs_ratio_uniforms
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
get_distribution_names, _kurtosis, _ncx2_cdf, _ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, _get_fixed_fit_value, _check_shape,
_fit_determine_optimizer)
from ._ksstats import kolmogn, kolmognp, kolmogni
from ._constants import (_XMIN, _EULER, _ZETA3,
_SQRT_2_OVER_PI, _LOG_SQRT_2_OVER_PI)
# In numpy 1.12 and above, np.power refuses to raise integers to negative
# powers, and `np.float_power` is a new replacement.
try:
float_power = np.float_power
except AttributeError:
float_power = np.power
def _remove_optimizer_parameters(kwds):
"""
Remove the optimizer-related keyword arguments 'loc', 'scale' and
'optimizer' from `kwds`. Then check that `kwds` is empty, and
raise `TypeError("Unknown arguments: %s." % kwds)` if it is not.
This function is used in the fit method of distributions that override
the default method and do not use the default optimization code.
`kwds` is modified in-place.
"""
kwds.pop('loc', None)
kwds.pop('scale', None)
kwds.pop('optimizer', None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
r"""Kolmogorov-Smirnov one-sided test statistic distribution.
This is the distribution of the one-sided Kolmogorov-Smirnov (KS)
statistics :math:`D_n^+` and :math:`D_n^-`
for a finite sample size ``n`` (the shape parameter).
%(before_notes)s
Notes
-----
:math:`D_n^+` and :math:`D_n^-` are given by
.. math::
D_n^+ &= \text{sup}_x (F_n(x) - F(x)),\\
D_n^- &= \text{sup}_x (F(x) - F_n(x)),\\
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`ksone` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
See Also
--------
kstwobign, kstwo, kstest
References
----------
.. [1] Birnbaum, Z. W. and Tingey, F.H. "One-sided confidence contours
for probability distribution functions", The Annals of Mathematical
Statistics, 22(4), pp 592-596 (1951).
%(example)s
"""
def _pdf(self, x, n):
return -scu._smirnovp(n, x)
def _cdf(self, x, n):
return scu._smirnovc(n, x)
def _sf(self, x, n):
return sc.smirnov(n, x)
def _ppf(self, q, n):
return scu._smirnovci(n, q)
def _isf(self, q, n):
return sc.smirnovi(n, q)
ksone = ksone_gen(a=0.0, b=1.0, name='ksone')
class kstwo_gen(rv_continuous):
r"""Kolmogorov-Smirnov two-sided test statistic distribution.
This is the distribution of the two-sided Kolmogorov-Smirnov (KS)
statistic :math:`D_n` for a finite sample size ``n``
(the shape parameter).
%(before_notes)s
Notes
-----
:math:`D_n` is given by
.. math::
D_n &= \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a (continuous) CDF and :math:`F_n` is an empirical CDF.
`kstwo` describes the distribution under the null hypothesis of the KS test
that the empirical CDF corresponds to :math:`n` i.i.d. random variates
with CDF :math:`F`.
%(after_notes)s
See Also
--------
kstwobign, ksone, kstest
References
----------
.. [1] Simard, R., L'Ecuyer, P. "Computing the Two-Sided
Kolmogorov-Smirnov Distribution", Journal of Statistical Software,
Vol 39, 11, 1-18 (2011).
%(example)s
"""
def _get_support(self, n):
return (0.5/(n if not isinstance(n, Iterable) else np.asanyarray(n)),
1.0)
def _pdf(self, x, n):
return kolmognp(n, x)
def _cdf(self, x, n):
return kolmogn(n, x)
def _sf(self, x, n):
return kolmogn(n, x, cdf=False)
def _ppf(self, q, n):
return kolmogni(n, q, cdf=True)
def _isf(self, q, n):
return kolmogni(n, q, cdf=False)
# Use the pdf, (not the ppf) to compute moments
kstwo = kstwo_gen(momtype=0, a=0.0, b=1.0, name='kstwo')
class kstwobign_gen(rv_continuous):
r"""Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov
statistic :math:`\sqrt{n} D_n` that measures the maximum absolute
distance of the theoretical (continuous) CDF from the empirical CDF.
(see `kstest`).
%(before_notes)s
Notes
-----
:math:`\sqrt{n} D_n` is given by
.. math::
D_n = \text{sup}_x |F_n(x) - F(x)|
where :math:`F` is a continuous CDF and :math:`F_n` is an empirical CDF.
`kstwobign` describes the asymptotic distribution (i.e. the limit of
:math:`\sqrt{n} D_n`) under the null hypothesis of the KS test that the
empirical CDF corresponds to i.i.d. random variates with CDF :math:`F`.
%(after_notes)s
See Also
--------
ksone, kstwo, kstest
References
----------
.. [1] Feller, W. "On the Kolmogorov-Smirnov Limit Theorems for Empirical
Distributions", Ann. Math. Statist. Vol 19, 177-189 (1948).
%(example)s
"""
def _pdf(self, x):
return -scu._kolmogp(x)
def _cdf(self, x):
return scu._kolmogc(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return scu._kolmogci(q)
def _isf(self, q):
return sc.kolmogi(q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
r"""A normal continuous random variable.
The location (``loc``) keyword specifies the mean.
The scale (``scale``) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is:
.. math::
f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_normal(size)
def _pdf(self, x):
# norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the normal distribution parameters, so the
`optimizer` argument is ignored.\n\n""")
def fit(self, data, **kwds):
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
def _munp(self, n):
"""
@returns Moments of standard normal distribution for integer n >= 0
See eq. 16 of https://arxiv.org/abs/1209.4340v2
"""
if n % 2 == 0:
return sc.factorial2(n - 1)
else:
return 0.
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
r"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` ([1]_, [2]_) is:
.. math::
f(x, a) = \frac{1}{x^2 \Phi(a) \sqrt{2\pi}} *
\exp(-\frac{1}{2} (a-1/x)^2)
where :math:`\Phi` is the normal CDF, :math:`x > 0`, and :math:`a > 0`.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] Johnson, Kotz, and Balakrishnan, "Continuous Univariate
Distributions, Volume 1", Second Edition, John Wiley and Sons,
p. 173 (1994).
.. [2] Anthony A. Salvia, "Reliability applications of the Alpha
Distribution", IEEE Transactions on Reliability, Vol. R-34,
No. 3, pp. 251-252 (1985).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
r"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is:
.. math::
f(x) = \sin(2x + \pi/2) = \cos(2x)
for :math:`-\pi/4 \le x \le \pi/4`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# anglit.pdf(x) = sin(2*x + \pi/2) = cos(2*x)
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
r"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is:
.. math::
f(x) = \frac{1}{\pi \sqrt{x (1-x)}}
for :math:`0 < x < 1`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < "
"(x - loc)/scale < {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
r"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is:
.. math::
f(x, a, b) = \frac{\Gamma(a+b) x^{a-1} (1-x)^{b-1}}
{\Gamma(a) \Gamma(b)}
for :math:`0 <= x <= 1`, :math:`a > 0`, :math:`b > 0`, where
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`beta` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b, size=None, random_state=None):
return random_state.beta(a, b, size)
def _pdf(self, x, a, b):
# gamma(a+b) * x**(a-1) * (1-x)**(b-1)
# beta.pdf(x, a, b) = ------------------------------------
# gamma(a)*gamma(b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@extend_notes_in_docstring(rv_continuous, notes="""\
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.\n\n""")
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
# We already got these from kwds, so just pop them.
kwds.pop('floc', None)
kwds.pop('fscale', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
f1 = _get_fixed_fit_value(kwds, ['f1', 'fb', 'fix_b'])
_remove_optimizer_parameters(kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
r"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is:
.. math::
f(x, a, b) = \frac{x^{a-1} (1+x)^{-a-b}}{\beta(a, b)}
for :math:`x >= 0`, :math:`a > 0`, :math:`b > 0`, where
:math:`\beta(a, b)` is the beta function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b, size=None, random_state=None):
u1 = gamma.rvs(a, size=size, random_state=random_state)
u2 = gamma.rvs(b, size=size, random_state=random_state)
return u1 / u2
def _pdf(self, x, a, b):
# betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
r"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is:
.. math::
f(x, c) = \frac{c}{\log(1+c) (1+cx)}
for :math:`0 <= x <= 1` and :math:`c > 0`.
`bradford` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# bradford.pdf(x, c) = c / (k * (1+c*x))
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) +
6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
r"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr12 : Burr Type XII distribution
mielke : Mielke Beta-Kappa / Dagum distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{-c - 1} / (1 + x^{-c})^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr` takes :math:`c` and :math:`d` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_. The distribution
is also commonly referred to as the Dagum distribution [2]_. If the
parameter :math:`c < 1` then the mean of the distribution does not
exist and if :math:`c < 2` the variance does not exist [2]_.
The PDF is finite at the left endpoint :math:`x = 0` if :math:`c * d >= 1`.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://en.wikipedia.org/wiki/Dagum_distribution
.. [3] Kleiber, Christian. "A guide to the Dagum distributions."
Modeling Income Distributions and Lorenz Curves pp 97-117 (2008).
%(example)s
"""
# Do not set _support_mask to rv_continuous._open_support_mask
# Whether the left-hand endpoint is suitable for pdf evaluation is dependent
# on the values of c and d: if c*d >= 1, the pdf is finite, otherwise infinite.
def _pdf(self, x, c, d):
# burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
output = _lazywhere(x == 0, [x, c, d],
lambda x_, c_, d_: c_ * d_ * (x_**(c_*d_-1)) / (1 + x_**c_),
f2 = lambda x_, c_, d_: (c_ * d_ * (x_ ** (-c_ - 1.0)) /
((1 + x_ ** (-c_)) ** (d_ + 1.0))))
if output.ndim == 0:
return output[()]
return output
def _logpdf(self, x, c, d):
output = _lazywhere(
x == 0, [x, c, d],
lambda x_, c_, d_: (np.log(c_) + np.log(d_) + sc.xlogy(c_*d_ - 1, x_)
- (d_+1) * sc.log1p(x_**(c_))),
f2 = lambda x_, c_, d_: (np.log(c_) + np.log(d_)
+ sc.xlogy(-c_ - 1, x_)
- sc.xlog1py(d_+1, x_**(-c_))))
if output.ndim == 0:
return output[()]
return output
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _logcdf(self, x, c, d):
return sc.log1p(x**(-c)) * (-d)
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return np.log1p(- (1 + x**(-c))**(-d))
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _stats(self, c, d):
nc = np.arange(1, 5).reshape(4,1) / c
#ek is the kth raw moment, e1 is the mean e2-e1**2 variance etc.
e1, e2, e3, e4 = sc.beta(d + nc, 1. - nc) * d
mu = np.where(c > 1.0, e1, np.nan)
mu2_if_c = e2 - mu**2
mu2 = np.where(c > 2.0, mu2_if_c, np.nan)
g1 = _lazywhere(
c > 3.0,
(c, e1, e2, e3, mu2_if_c),
lambda c, e1, e2, e3, mu2_if_c: (e3 - 3*e2*e1 + 2*e1**3) / np.sqrt((mu2_if_c)**3),
fillvalue=np.nan)
g2 = _lazywhere(
c > 4.0,
(c, e1, e2, e3, e4, mu2_if_c),
lambda c, e1, e2, e3, e4, mu2_if_c: (
((e4 - 4*e3*e1 + 6*e2*e1**2 - 3*e1**4) / mu2_if_c**2) - 3),
fillvalue=np.nan)
return mu, mu2, g1, g2
def _munp(self, n, c, d):
def __munp(n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
n, c, d = np.asarray(n), np.asarray(c), np.asarray(d)
return _lazywhere((c > n) & (n == n) & (d == d), (c, d, n),
lambda c, d, n: __munp(n, c, d),
np.nan)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
r"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or `burr12` with ``d=1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is:
.. math::
f(x, c, d) = c d x^{c-1} / (1 + x^c)^{d + 1}
for :math:`x >= 0` and :math:`c, d > 0`.
`burr12` takes ``c`` and ``d`` as shape parameters for :math:`c`
and :math:`d`.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
.. [3] "Burr distribution",
https://en.wikipedia.org/wiki/Burr_distribution
%(example)s
"""
def _pdf(self, x, c, d):
# burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
r"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is:
.. math::
f(x, c) = c x^{-c-1} (1 + x^{-c})^{-2}
for :math:`x >= 0` and :math:`c > 0`.
`fisk` takes ``c`` as a shape parameter for :math:`c`.
`fisk` is a special case of `burr` or `burr12` with ``d=1``.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._pdf(x, c, 1.0)
def _cdf(self, x, c):
return burr._cdf(x, c, 1.0)
def _sf(self, x, c):
return burr._sf(x, c, 1.0)
def _logpdf(self, x, c):
# fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
return burr._logpdf(x, c, 1.0)
def _logcdf(self, x, c):
return burr._logcdf(x, c, 1.0)
def _logsf(self, x, c):
return burr._logsf(x, c, 1.0)
def _ppf(self, x, c):
return burr._ppf(x, c, 1.0)
def _munp(self, n, c):
return burr._munp(n, c, 1.0)
def _stats(self, c):
return burr._stats(c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
r"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is
.. math::
f(x) = \frac{1}{\pi (1 + x^2)}
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cauchy.pdf(x) = 1 / (pi * (1 + x**2))
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
r"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is:
.. math::
f(x, k) = \frac{1}{2^{k/2-1} \Gamma \left( k/2 \right)}
x^{k-1} \exp \left( -x^2/2 \right)
for :math:`x >= 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation). :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return np.sqrt(chi2.rvs(df, size=size, random_state=random_state))
def _pdf(self, x, df):
# x**(df-1) * exp(-x**2/2)
# chi.pdf(x, df) = -------------------------
# 2**(df/2-1) * gamma(df/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
r"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is:
.. math::
f(x, k) = \frac{1}{2^{k/2} \Gamma \left( k/2 \right)}
x^{k/2-1} \exp \left( -x/2 \right)
for :math:`x > 0` and :math:`k > 0` (degrees of freedom, denoted ``df``
in the implementation).
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, size=None, random_state=None):
return random_state.chisquare(df, size)
def _pdf(self, x, df):
# chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return 2*sc.gammaincinv(df/2, p)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
r"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is:
.. math::
f(x) = \frac{1}{2\pi} (1+\cos(x))
for :math:`-\pi \le x \le \pi`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
r"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is:
.. math::
f(x, a) = \frac{1}{2\Gamma(a)} |x|^{a-1} \exp(-|x|)
for a real number :math:`x` and :math:`a > 0`. :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`dgamma` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
u = random_state.uniform(size=size)
gm = gamma.rvs(a, size=size, random_state=random_state)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
# dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
r"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is given by
.. math::
f(x, c) = c / 2 |x|^{c-1} \exp(-|x|^c)
for a real number :math:`x` and :math:`c > 0`.
`dweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
u = random_state.uniform(size=size)
w = weibull_min.rvs(c, size=size, random_state=random_state)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
# dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
r"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is:
.. math::
f(x) = \exp(-x)
for :math:`x \ge 0`.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.standard_exponential(size)
def _pdf(self, x):
# expon.pdf(x) = exp(-x)
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the exponential distribution parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
data_min = data.min()
if floc is None:
# ML estimate of the location is the minimum of the data.
loc = data_min
else:
loc = floc
if data_min < loc:
# There are values that are less than the specified loc.
raise FitDataError("expon", lower=floc, upper=np.inf)
if fscale is None:
# ML estimate of the scale is the shifted mean.
scale = data.mean() - loc
else:
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
expon = expon_gen(a=0.0, name='expon')
# Exponentially Modified Normal (exponential distribution
# convolved with a Normal).
# This is called an exponentially modified gaussian on wikipedia.
class exponnorm_gen(rv_continuous):
r"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is:
.. math::
f(x, K) = \frac{1}{2K} \exp\left(\frac{1}{2 K^2} - x / K \right)
\text{erfc}\left(-\frac{x - 1/K}{\sqrt{2}}\right)
where :math:`x` is a real number and :math:`K > 0`.
It can be thought of as the sum of a standard normal random variable
and an independent exponentially distributed random variable with rate
``1/K``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and
:math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/(\sigma\lambda)`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K, size=None, random_state=None):
expval = random_state.standard_exponential(size) * K
gval = random_state.standard_normal(size)
return expval + gval
def _pdf(self, x, K):
return np.exp(self._logpdf(x, K))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = invK * (0.5 * invK - x)
return exparg + _norm_logcdf(x - invK) - np.log(K)
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
r"""An exponentiated Weibull continuous random variable.
%(before_notes)s
See Also
--------
weibull_min, numpy.random.RandomState.weibull
Notes
-----
The probability density function for `exponweib` is:
.. math::
f(x, a, c) = a c [1-\exp(-x^c)]^{a-1} \exp(-x^c) x^{c-1}
and its cumulative distribution function is:
.. math::
F(x, a, c) = [1-\exp(-x^c)]^a
for :math:`x > 0`, :math:`a > 0`, :math:`c > 0`.
`exponweib` takes :math:`a` and :math:`c` as shape parameters:
* :math:`a` is the exponentiation parameter,
with the special case :math:`a=1` corresponding to the
(non-exponentiated) Weibull distribution `weibull_min`.
* :math:`c` is the shape parameter of the non-exponentiated Weibull law.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Exponentiated_Weibull_distribution
%(example)s
"""
def _pdf(self, x, a, c):
# exponweib.pdf(x, a, c) =
# a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
r"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is:
.. math::
f(x, b) = b x^{b-1} \exp(1 + x^b - \exp(x^b))
for :math:`x \ge 0`, :math:`b > 0`. Note that this is a different
distribution from the exponential power distribution that is also known
under the names "generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
# exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
r"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is:
.. math::
f(x, c) = \frac{x+1}{2c\sqrt{2\pi x^3}} \exp(-\frac{(x-1)^2}{2x c^2})
for :math:`x >= 0` and :math:`c > 0`.
`fatiguelife` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
https://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c, size=None, random_state=None):
z = random_state.standard_normal(size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
# fatiguelife.pdf(x, c) =
# (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
r"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is:
.. math::
f(x, c) = \frac{1}{\pi (1+(x-c)^2)} + \frac{1}{\pi (1+(x+c)^2)}
for :math:`x \ge 0`.
`foldcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return abs(cauchy.rvs(loc=c, size=size,
random_state=random_state))
def _pdf(self, x, c):
# foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
r"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is:
.. math::
f(x, df_1, df_2) = \frac{df_2^{df_2/2} df_1^{df_1/2} x^{df_1 / 2-1}}
{(df_2+df_1 x)^{(df_1+df_2)/2}
B(df_1/2, df_2/2)}
for :math:`x > 0`.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, size=None, random_state=None):
return random_state.f(dfn, dfd, size)
def _pdf(self, x, dfn, dfd):
# df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
# F.pdf(x, df1, df2) = --------------------------------------------
# (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + sc.xlogy(n/2 - 1, x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
r"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is:
.. math::
f(x, c) = \sqrt{2/\pi} cosh(c x) \exp(-\frac{x^2+c^2}{2})
for :math:`c \ge 0`.
`foldnorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c, size=None, random_state=None):
return abs(random_state.standard_normal(size) + c)
def _pdf(self, x, c):
# foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# https://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
class weibull_min_gen(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is also often simply called the Weibull
distribution. It arises as the limiting distribution of the rescaled
minimum of iid random variables.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.RandomState.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
class weibull_max_gen(rv_continuous):
r"""Weibull maximum continuous random variable.
The Weibull Maximum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is the limiting distribution of rescaled
maximum of iid random variables. This is the distribution of -X
if X is from the `weibull_min` function.
%(before_notes)s
See Also
--------
weibull_min
Notes
-----
The probability density function for `weibull_max` is:
.. math::
f(x, c) = c (-x)^{c-1} \exp(-(-x)^c)
for :math:`x < 0`, :math:`c > 0`.
`weibull_max` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _pdf(self, x, c):
# weibull_max.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
weibull_max = weibull_max_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
r"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is:
.. math::
f(x, c) = c \frac{\exp(-x)}
{(1 + \exp(-x))^{c+1}}
for :math:`x >= 0`, :math:`c > 0`.
`genlogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
# Two mathematically equivalent expressions for log(pdf(x, c)):
# log(pdf(x, c)) = log(c) - x - (c + 1)*log(1 + exp(-x))
# = log(c) + c*x - (c + 1)*log(1 + exp(x))
mult = -(c - 1) * (x < 0) - 1
absx = np.abs(x)
return np.log(c) + mult*absx - (c+1) * sc.log1p(np.exp(-absx))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _get_support(self, c):
c = np.asarray(c)
b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
a = np.where(c >= 0, self.a, self.a)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _stats(self, c, moments='mv'):
if 'm' not in moments:
m = None
else:
m = _lazywhere(c < 1, (c,),
lambda xi: 1/(1 - xi),
np.inf)
if 'v' not in moments:
v = None
else:
v = _lazywhere(c < 1/2, (c,),
lambda xi: 1 / (1 - xi)**2 / (1 - 2*xi),
np.nan)
if 's' not in moments:
s = None
else:
s = _lazywhere(c < 1/3, (c,),
lambda xi: 2 * (1 + xi) * np.sqrt(1 - 2*xi) /
(1 - 3*xi),
np.nan)
if 'k' not in moments:
k = None
else:
k = _lazywhere(c < 1/4, (c,),
lambda xi: 3 * (1 - 2*xi) * (2*xi**2 + xi + 3) /
(1 - 3*xi) / (1 - 4*xi) - 3,
np.nan)
return m, v, s, k
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
r"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is:
.. math::
f(x, a, b, c) = (a + b (1 - \exp(-c x)))
\exp(-a x - b x + \frac{b}{c} (1-\exp(-c x)))
for :math:`x \ge 0`, :math:`a, b, c > 0`.
`genexpon` takes :math:`a`, :math:`b` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
# genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
# exp(-a*x - b*x + b/c * (1-exp(-c*x)))
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
r"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For :math:`c=0`, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is:
.. math::
f(x, c) = \begin{cases}
\exp(-\exp(-x)) \exp(-x) &\text{for } c = 0\\
\exp(-(1-c x)^{1/c}) (1-c x)^{1/c-1} &\text{for }
x \le 1/c, c > 0
\end{cases}
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter :math:`c`.
`genextreme` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.where(abs(c) == np.inf, 0, 1)
def _get_support(self, c):
_b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
_a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return _a, _b
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
# genextreme.pdf(x, c) =
# exp(-exp(-x))*exp(-x), for c==0
# exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x \le 1/c, c > 0
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = _lazywhere(c >= -1./3,
(c, g1, g2, g3, g2mg12),
lambda c, g1, g2, g3, g2gm12:
np.sign(c)*(-g3 + (g2 + 2*g2mg12)*g1)/g2mg12**1.5,
fillvalue=np.nan)
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = _lazywhere(c >= -1./4,
(g1, g2, g3, g4, g2mg12),
lambda g1, g2, g3, g4, g2mg12:
(g4 + (-4*g3 + 3*(g2 + g2mg12)*g1)*g1)/g2mg12**2,
fillvalue=np.nan)
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
r"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is:
.. math::
f(x, a) = \frac{x^{a-1} e^{-x}}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`. Here :math:`\Gamma(a)` refers to the
gamma function.
`gamma` takes ``a`` as a shape parameter for :math:`a`.
When :math:`a` is an integer, `gamma` reduces to the Erlang
distribution, and when :math:`a=1` to the exponential distribution.
Gamma distributions are sometimes parameterized with two variables,
with a probability density function of:
.. math::
f(x, \alpha, \beta) = \frac{\beta^\alpha x^{\alpha - 1} e^{-\beta x }}{\Gamma(\alpha)}
Note that this parameterization is equivalent to the above, with
``scale = 1 / beta``.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, size=None, random_state=None):
return random_state.standard_gamma(a, size)
def _pdf(self, x, a):
# gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location is fixed by using the argument `floc`, this
function uses explicit formulas or solves a simpler numerical
problem than the full ML optimization problem. So in that case,
the `optimizer`, `loc` and `scale` arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# We already have this value, so just pop it from kwds.
kwds.pop('floc', None)
f0 = _get_fixed_fit_value(kwds, ['f0', 'fa', 'fix_a'])
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) +
# np.log(data).mean() = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return a > 0
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
r"""A generalized gamma continuous random variable.
%(before_notes)s
See Also
--------
gamma, invgamma, weibull_min
Notes
-----
The probability density function for `gengamma` is ([1]_):
.. math::
f(x, a, c) = \frac{|c| x^{c a-1} \exp(-x^c)}{\Gamma(a)}
for :math:`x \ge 0`, :math:`a > 0`, and :math:`c \ne 0`.
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gengamma` takes :math:`a` and :math:`c` as shape parameters.
%(after_notes)s
References
----------
.. [1] E.W. Stacy, "A Generalization of the Gamma Distribution",
Annals of Mathematical Statistics, Vol 33(3), pp. 1187--1192.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _rvs(self, a, c, size=None, random_state=None):
r = random_state.standard_gamma(a, size=size)
return r**(1./c)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
r"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is:
.. math::
f(x, c) = \frac{2 (1 - c x)^{1/(c-1)}}{[1 + (1 - c x)^{1/c}]^2}
for :math:`0 \le x \le 1/c`, and :math:`c > 0`.
`genhalflogistic` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c > 0
def _get_support(self, c):
return self.a, 1.0/c
def _pdf(self, x, c):
# genhalflogistic.pdf(x, c) =
# 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
r"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is:
.. math::
f(x, c) = c \exp(x) \exp(-c (e^x-1))
for :math:`x \ge 0`, :math:`c > 0`.
`gompertz` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
r"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is:
.. math::
f(x) = \exp(-(x + e^{-x}))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_r.pdf(x) = exp(-(x + exp(-x)))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _sf(self, x):
return -sc.expm1(-np.exp(-x))
def _isf(self, p):
return -np.log(-np.log1p(-p))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# https://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
r"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is:
.. math::
f(x) = \frac{2}{\pi (1 + x^2)}
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
r"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is:
.. math::
f(x) = \frac{ 2 e^{-x} }{ (1+e^{-x})^2 }
= \frac{1}{2} \text{sech}(x/2)^2
for :math:`x \ge 0`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2
# = 1/2 * sech(x/2)**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
r"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is:
.. math::
f(x) = \sqrt{2/\pi} \exp(-x^2 / 2)
for :math:`x >= 0`.
`halfnorm` is a special case of `chi` with ``df=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return abs(random_state.standard_normal(size=size))
def _pdf(self, x):
# halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi),
1-2.0/np.pi,
np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
r"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is:
.. math::
f(x) = \frac{1}{\pi} \text{sech}(x)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
# hypsecant.pdf(x) = 1/pi * sech(x)
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
r"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is:
.. math::
f(x, a, b, c, z) = C x^{a-1} (1-x)^{b-1} (1+zx)^{-c}
for :math:`0 \le x \le 1`, :math:`a > 0`, :math:`b > 0`, and
:math:`C = \frac{1}{B(a, b) F[2, 1](c, a; a+b; -z)}`.
:math:`F[2, 1]` is the Gauss hypergeometric function
`scipy.special.hyp2f1`.
`gausshyper` takes :math:`a`, :math:`b`, :math:`c` and :math:`z` as shape
parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
# gausshyper.pdf(x, a, b, c, z) =
# C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
r"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is:
.. math::
f(x, a) = \frac{x^{-a-1}}{\Gamma(a)} \exp(-\frac{1}{x})
for :math:`x >= 0`, :math:`a > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`invgamma` takes ``a`` as a shape parameter for :math:`a`.
`invgamma` is a special case of `gengamma` with ``c=-1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
# invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
r"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2 \pi x^3}}
\exp(-\frac{(x-\mu)^2}{2 x \mu^2})
for :math:`x >= 0` and :math:`\mu > 0`.
`invgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
When :math:`\mu` is too small, evaluating the cumulative distribution
function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for :math:`\mu \le 0.0028`.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu, size=None, random_state=None):
return random_state.wald(mu, 1.0, size=size)
def _pdf(self, x, mu):
# invgauss.pdf(x, mu) =
# 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
def fit(self, data, *args, **kwds):
if type(self) == wald_gen:
return super(invgauss_gen, self).fit(data, *args, **kwds)
data, fshape_s, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
'''
Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
and Peacock (2000), Page 121. Their shape parameter is equivilent to
SciPy's with the conversion `fshape_s = fshape / scale`.
MLE formulas are not used in 3 condtions:
- `loc` is not fixed
- `mu` is fixed
These cases fall back on the superclass fit method.
- `loc` is fixed but translation results in negative data raises
a `FitDataError`.
'''
if floc is None or fshape_s is not None:
return super(invgauss_gen, self).fit(data, *args, **kwds)
elif np.any(data - floc < 0):
raise FitDataError("invgauss", lower=0, upper=np.inf)
else:
data = data - floc
fshape_n = np.mean(data)
if fscale is None:
fscale = len(data) / (np.sum(data ** -1 - fshape_n ** -1))
fshape_s = fshape_n / fscale
return fshape_s, floc, fscale
invgauss = invgauss_gen(a=0.0, name='invgauss')
class geninvgauss_gen(rv_continuous):
r"""A Generalized Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `geninvgauss` is:
.. math::
f(x, p, b) = x^{p-1} \exp(-b (x + 1/x) / 2) / (2 K_p(b))
where `x > 0`, and the parameters `p, b` satisfy `b > 0` ([1]_).
:math:`K_p` is the modified Bessel function of second kind of order `p`
(`scipy.special.kv`).
%(after_notes)s
The inverse Gaussian distribution `stats.invgauss(mu)` is a special case of
`geninvgauss` with `p = -1/2`, `b = 1 / mu` and `scale = mu`.
Generating random variates is challenging for this distribution. The
implementation is based on [2]_.
References
----------
.. [1] O. Barndorff-Nielsen, P. Blaesild, C. Halgreen, "First hitting time
models for the generalized inverse gaussian distribution",
Stochastic Processes and their Applications 7, pp. 49--54, 1978.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
%(example)s
"""
def _argcheck(self, p, b):
return (p == p) & (b > 0)
def _logpdf(self, x, p, b):
# kve instead of kv works better for large values of b
# warn if kve produces infinite values and replace by nan
# otherwise c = -inf and the results are often incorrect
@np.vectorize
def logpdf_single(x, p, b):
return _stats.geninvgauss_logpdf(x, p, b)
z = logpdf_single(x, p, b)
if np.isnan(z).any():
msg = ("Infinite values encountered in scipy.special.kve(p, b). "
"Values replaced by NaN to avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
return z
def _pdf(self, x, p, b):
# relying on logpdf avoids overflow of x**(p-1) for large x and p
return np.exp(self._logpdf(x, p, b))
def _cdf(self, x, *args):
_a, _b = self._get_support(*args)
@np.vectorize
def _cdf_single(x, *args):
p, b = args
user_data = np.array([p, b], float).ctypes.data_as(ctypes.c_void_p)
llc = LowLevelCallable.from_cython(_stats, '_geninvgauss_pdf', user_data)
return integrate.quad(llc, _a, x)[0]
return _cdf_single(x, *args)
def _logquasipdf(self, x, p, b):
# log of the quasi-density (w/o normalizing constant) used in _rvs
return _lazywhere(x > 0, (x, p, b),
lambda x, p, b: (p - 1)*np.log(x) - b*(x + 1/x)/2,
-np.inf)
def _rvs(self, p, b, size=None, random_state=None):
# if p and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(p) and np.isscalar(b):
out = self._rvs_scalar(p, b, size, random_state)
elif p.size == 1 and b.size == 1:
out = self._rvs_scalar(p.item(), b.item(), size, random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
p, b = np.broadcast_arrays(p, b)
# p and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting p and b.
# bc is a tuple the same lenth as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(p.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([p, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples, random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, p, b, numsamples, random_state):
# following [2], the quasi-pdf is used instead of the pdf for the
# generation of rvs
invert_res = False
if not(numsamples):
numsamples = 1
if p < 0:
# note: if X is geninvgauss(p, b), then 1/X is geninvgauss(-p, b)
p = -p
invert_res = True
m = self._mode(p, b)
# determine method to be used following [2]
ratio_unif = True
if p >= 1 or b > 1:
# ratio of uniforms with mode shift below
mode_shift = True
elif b >= min(0.5, 2 * np.sqrt(1 - p) / 3):
# ratio of uniforms without mode shift below
mode_shift = False
else:
# new algorithm in [2]
ratio_unif = False
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
x = np.zeros(N)
simulated = 0
if ratio_unif:
# use ratio of uniforms method
if mode_shift:
a2 = -2 * (p + 1) / b - m
a1 = 2 * m * (p - 1) / b - 1
# find roots of x**3 + a2*x**2 + a1*x + m (Cardano's formula)
p1 = a1 - a2**2 / 3
q1 = 2 * a2**3 / 27 - a2 * a1 / 3 + m
phi = np.arccos(-q1 * np.sqrt(-27 / p1**3) / 2)
s1 = -np.sqrt(-4 * p1 / 3)
root1 = s1 * np.cos(phi / 3 + np.pi / 3) - a2 / 3
root2 = -s1 * np.cos(phi / 3) - a2 / 3
# root3 = s1 * np.cos(phi / 3 - np.pi / 3) - a2 / 3
# if g is the quasipdf, rescale: g(x) / g(m) which we can write
# as exp(log(g(x)) - log(g(m))). This is important
# since for large values of p and b, g cannot be evaluated.
# denote the rescaled quasipdf by h
lm = self._logquasipdf(m, p, b)
d1 = self._logquasipdf(root1, p, b) - lm
d2 = self._logquasipdf(root2, p, b) - lm
# compute the bounding rectangle w.r.t. h. Note that
# np.exp(0.5*d1) = np.sqrt(g(root1)/g(m)) = np.sqrt(h(root1))
vmin = (root1 - m) * np.exp(0.5 * d1)
vmax = (root2 - m) * np.exp(0.5 * d2)
umax = 1 # umax = sqrt(h(m)) = 1
logqpdf = lambda x: self._logquasipdf(x, p, b) - lm
c = m
else:
# ratio of uniforms without mode shift
# compute np.sqrt(quasipdf(m))
umax = np.exp(0.5*self._logquasipdf(m, p, b))
xplus = ((1 + p) + np.sqrt((1 + p)**2 + b**2))/b
vmin = 0
# compute xplus * np.sqrt(quasipdf(xplus))
vmax = xplus * np.exp(0.5 * self._logquasipdf(xplus, p, b))
c = 0
logqpdf = lambda x: self._logquasipdf(x, p, b)
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
i = 1
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u = umax * random_state.uniform(size=k)
v = random_state.uniform(size=k)
v = vmin + (vmax - vmin) * v
rvs = v / u + c
# rewrite acceptance condition u**2 <= pdf(rvs) by taking logs
accept = (2*np.log(u) <= logqpdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated "
"in {} attempts. Sampling does not appear to "
"work for the provided parameters.".format(i*N))
raise RuntimeError(msg)
i += 1
else:
# use new algorithm in [2]
x0 = b / (1 - p)
xs = np.max((x0, 2 / b))
k1 = np.exp(self._logquasipdf(m, p, b))
A1 = k1 * x0
if x0 < 2 / b:
k2 = np.exp(-b)
if p > 0:
A2 = k2 * ((2 / b)**p - x0**p) / p
else:
A2 = k2 * np.log(2 / b**2)
else:
k2, A2 = 0, 0
k3 = xs**(p - 1)
A3 = 2 * k3 * np.exp(-xs * b / 2) / b
A = A1 + A2 + A3
# [2]: rejection constant is < 2.73; so expected runtime is finite
while simulated < N:
k = N - simulated
h, rvs = np.zeros(k), np.zeros(k)
# simulate uniform rvs on [x1, x2] and [0, y2]
u = random_state.uniform(size=k)
v = A * random_state.uniform(size=k)
cond1 = v <= A1
cond2 = np.logical_not(cond1) & (v <= A1 + A2)
cond3 = np.logical_not(cond1 | cond2)
# subdomain (0, x0)
rvs[cond1] = x0 * v[cond1] / A1
h[cond1] = k1
# subdomain (x0, 2 / b)
if p > 0:
rvs[cond2] = (x0**p + (v[cond2] - A1) * p / k2)**(1 / p)
else:
rvs[cond2] = b * np.exp((v[cond2] - A1) * np.exp(b))
h[cond2] = k2 * rvs[cond2]**(p - 1)
# subdomain (xs, infinity)
z = np.exp(-xs * b / 2) - b * (v[cond3] - A1 - A2) / (2 * k3)
rvs[cond3] = -2 / b * np.log(z)
h[cond3] = k3 * np.exp(-rvs[cond3] * b / 2)
# apply rejection method
accept = (np.log(u * h) <= self._logquasipdf(rvs, p, b))
num_accept = sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
rvs = np.reshape(x, size1d)
if invert_res:
rvs = 1 / rvs
return rvs
def _mode(self, p, b):
# distinguish cases to avoid catastrophic cancellation (see [2])
if p < 1:
return b / (np.sqrt((p - 1)**2 + b**2) + 1 - p)
else:
return (np.sqrt((1 - p)**2 + b**2) - (1 - p)) / b
def _munp(self, n, p, b):
num = sc.kve(p + n, b)
denom = sc.kve(p, b)
inf_vals = np.isinf(num) | np.isinf(denom)
if inf_vals.any():
msg = ("Infinite values encountered in the moment calculation "
"involving scipy.special.kve. Values replaced by NaN to "
"avoid incorrect results.")
warnings.warn(msg, RuntimeWarning)
m = np.full_like(num, np.nan, dtype=np.double)
m[~inf_vals] = num[~inf_vals] / denom[~inf_vals]
else:
m = num / denom
return m
geninvgauss = geninvgauss_gen(a=0.0, name="geninvgauss")
class norminvgauss_gen(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
\exp(\sqrt{a^2 - b^2} + b x)
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
`Y = b * V + sqrt(V) * X` where `X` is `norm(0,1)` and `V` is
`invgauss(mu=1/sqrt(a**2 - b**2))`. This representation is used
to generate random variates.
References
----------
O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and Stochastic
Volatility Modelling", Scandinavian Journal of Statistics, Vol. 24,
pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi * np.exp(gamma)
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq) / sq
def _rvs(self, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
return b * ig + np.sqrt(ig) * norm.rvs(size=size, random_state=random_state)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
class invweibull_gen(rv_continuous):
u"""An inverted Weibull continuous random variable.
This distribution is also known as the Fréchet distribution or the
type II extreme value distribution.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is:
.. math::
f(x, c) = c x^{-c-1} \\exp(-x^{-c})
for :math:`x > 0`, :math:`c > 0`.
`invweibull` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
# invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
r"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is:
.. math::
f(x, a, b) = \frac{b}{x(1-x)} \phi(a + b \log \frac{x}{1-x} )
for :math:`0 <= x < =1` and :math:`a, b > 0`, and :math:`\phi` is the normal
pdf.
`johnsonsb` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
r"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is:
.. math::
f(x, a, b) = \frac{b}{\sqrt{x^2 + 1}}
\phi(a + b \log(x + \sqrt{x^2 + 1}))
for all :math:`x, a, b > 0`, and :math:`\phi` is the normal pdf.
`johnsonsu` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
# johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
# phi(a + b * log(x + sqrt(x**2 + 1)))
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
r"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is
.. math::
f(x) = \frac{1}{2} \exp(-|x|)
for a real number :math:`x`.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.laplace(0, 1, size=size)
def _pdf(self, x):
# laplace.pdf(x) = 1/2 * exp(-abs(x))
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
@replace_notes_in_docstring(rv_continuous, notes="""\
This function uses explicit formulas for the maximum likelihood
estimation of the Laplace distribution parameters, so the keyword
arguments `loc`, `scale`, and `optimizer` are ignored.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 124
if floc is None:
floc = np.median(data)
if fscale is None:
fscale = (np.sum(np.abs(data - floc))) / len(data)
return floc, fscale
laplace = laplace_gen(name='laplace')
def _check_fit_input_parameters(dist, data, args, kwds):
data = np.asarray(data)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
num_shapes = len(dist.shapes.split(",")) if dist.shapes else 0
fshape_keys = []
fshapes = []
# user has many options for fixing the shape, so here we standardize it
# into 'f' + the number of the shape.
# Adapted from `_reduce_func` in `_distn_infrastructure.py`:
if dist.shapes:
shapes = dist.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
fshape_keys.append(key)
fshapes.append(val)
if val is not None:
kwds[key] = val
# determine if there are any unknown arguments in kwds
known_keys = {'loc', 'scale', 'optimizer', 'floc', 'fscale', *fshape_keys}
unknown_keys = set(kwds).difference(known_keys)
if unknown_keys:
raise TypeError(f"Unknown keyword arguments: {unknown_keys}.")
if len(args) > num_shapes:
raise TypeError("Too many positional arguments.")
if None not in {floc, fscale, *fshapes}:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise RuntimeError("All parameters fixed. There is nothing to "
"optimize.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
return (data, *fshapes, floc, fscale)
class levy_gen(rv_continuous):
r"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp\left(-\frac{1}{2x}\right)
for :math:`x >= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _sf(self, x):
return sc.erf(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
r"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is:
.. math::
f(x) = \frac{1}{|x| \sqrt{2\pi |x|}} \exp{ \left(-\frac{1}{2|x|} \right)}
for :math:`x <= 0`.
This is the same as the Levy-stable distribution with :math:`a=1/2` and
:math:`b=-1`.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
# levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _sf(self, x):
ax = abs(x)
return 2 * _norm_sf(1 / np.sqrt(ax))
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _isf(self, p):
return -1/_norm_isf(p/2)**2
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
r"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
The distribution for `levy_stable` has characteristic function:
.. math::
\varphi(t, \alpha, \beta, c, \mu) =
e^{it\mu -|ct|^{\alpha}(1-i\beta \operatorname{sign}(t)\Phi(\alpha, t))}
where:
.. math::
\Phi = \begin{cases}
\tan \left({\frac {\pi \alpha }{2}}\right)&\alpha \neq 1\\
-{\frac {2}{\pi }}\log |t|&\alpha =1
\end{cases}
The probability density function for `levy_stable` is:
.. math::
f(x) = \frac{1}{2\pi}\int_{-\infty}^\infty \varphi(t)e^{-ixt}\,dt
where :math:`-\infty < t < \infty`. This integral does not have a known closed form.
For evaluation of pdf we use either Zolotarev :math:`S_0` parameterization with integration,
direct integration of standard parameterization of characteristic function or FFT of
characteristic function. If set to other than None and if number of points is greater than
``levy_stable.pdf_fft_min_points_threshold`` (defaults to None) we use FFT otherwise we use one
of the other methods.
The default method is 'best' which uses Zolotarev's method if alpha = 1 and integration of
characteristic function otherwise. The default method can be changed by setting
``levy_stable.pdf_default_method`` to either 'zolotarev', 'quadrature' or 'best'.
To increase accuracy of FFT calculation one can specify ``levy_stable.pdf_fft_grid_spacing``
(defaults to 0.001) and ``pdf_fft_n_points_two_power`` (defaults to a value that covers the
input range * 4). Setting ``pdf_fft_n_points_two_power`` to 16 should be sufficiently accurate
in most cases at the expense of CPU time.
For evaluation of cdf we use Zolatarev :math:`S_0` parameterization with integration or integral of
the pdf FFT interpolated spline. The settings affecting FFT calculation are the same as
for pdf calculation. Setting the threshold to ``None`` (default) will disable FFT. For cdf
calculations the Zolatarev method is superior in accuracy, so FFT is disabled by default.
Fitting estimate uses quantile estimation method in [MC]. MLE estimation of parameters in
fit method uses this quantile estimate initially. Note that MLE doesn't always converge if
using FFT for pdf calculations; so it's best that ``pdf_fft_min_points_threshold`` is left unset.
.. warning::
For pdf calculations implementation of Zolatarev is unstable for values where alpha = 1 and
beta != 0. In this case the quadrature method is recommended. FFT calculation is also
considered experimental.
For cdf calculations FFT calculation is considered experimental. Use Zolatarev's method
instead (default).
%(after_notes)s
References
----------
.. [MC] McCulloch, J., 1986. Simple consistent estimators of stable distribution parameters.
Communications in Statistics - Simulation and Computation 15, 11091136.
.. [MS] Mittnik, S.T. Rachev, T. Doganoglu, D. Chenyao, 1999. Maximum likelihood estimation
of stable Paretian models, Mathematical and Computer Modelling, Volume 29, Issue 10,
1999, Pages 275-293.
.. [BS] Borak, S., Hardle, W., Rafal, W. 2005. Stable distributions, Economic Risk.
%(example)s
"""
def _rvs(self, alpha, beta, size=None, random_state=None):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
alpha = np.broadcast_to(alpha, size)
beta = np.broadcast_to(beta, size)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=size,
random_state=random_state)
W = expon.rvs(size=size, random_state=random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
@staticmethod
def _cf(t, alpha, beta):
Phi = lambda alpha, t: np.tan(np.pi*alpha/2) if alpha != 1 else -2.0*np.log(np.abs(t))/np.pi
return np.exp(-(np.abs(t)**alpha)*(1-1j*beta*np.sign(t)*Phi(alpha, t)))
@staticmethod
def _pdf_from_cf_with_fft(cf, h=0.01, q=9):
"""Calculates pdf from cf using fft. Using region around 0 with N=2**q points
separated by distance h. As suggested by [MS].
"""
N = 2**q
n = np.arange(1,N+1)
density = ((-1)**(n-1-N/2))*np.fft.fft(((-1)**(n-1))*cf(2*np.pi*(n-1-N/2)/h/N))/h/N
x = (n-1-N/2)*h
return (x, density)
@staticmethod
def _pdf_single_value_best(x, alpha, beta):
if alpha != 1. or (alpha == 1. and beta == 0.):
return levy_stable_gen._pdf_single_value_zolotarev(x, alpha, beta)
else:
return levy_stable_gen._pdf_single_value_cf_integrate(x, alpha, beta)
@staticmethod
def _pdf_single_value_cf_integrate(x, alpha, beta):
cf = lambda t: levy_stable_gen._cf(t, alpha, beta)
return integrate.quad(lambda t: np.real(np.exp(-1j*t*x)*cf(t)), -np.inf, np.inf, limit=1000)[0]/np.pi/2
@staticmethod
def _pdf_single_value_zolotarev(x, alpha, beta):
"""Calculate pdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
def g(theta):
return (V(theta) *
np.real(np.complex128(x0-zeta)**(alpha/(alpha-1))))
def f(theta):
return g(theta) * np.exp(-g(theta))
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
return 0.
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-xi, np.pi/2])
intg_kwargs = {}
# windows quadpack less forgiving with points out of bounds
if intg_max.success and not np.isnan(intg_max.fun)\
and intg_max.x > -xi and intg_max.x < np.pi/2:
intg_kwargs["points"] = [intg_max.x]
intg = integrate.quad(f, -xi, np.pi/2, **intg_kwargs)[0]
return alpha * intg / np.pi / np.abs(alpha-1) / (x0-zeta)
elif x0 == zeta:
return sc.gamma(1+1/alpha)*np.cos(xi)/np.pi/((1+zeta**2)**(1/alpha/2))
else:
return levy_stable_gen._pdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta != 0:
warnings.warn('Density calculation unstable for alpha=1 and beta!=0.' +
' Use quadrature method instead.', RuntimeWarning)
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
def g(theta):
return np.exp(-np.pi * x / 2. / beta) * V(theta)
def f(theta):
return g(theta) * np.exp(-g(theta))
with np.errstate(all="ignore"):
intg_max = optimize.minimize_scalar(lambda theta: -f(theta), bounds=[-np.pi/2, np.pi/2])
intg = integrate.fixed_quad(f, -np.pi/2, intg_max.x)[0] + integrate.fixed_quad(f, intg_max.x, np.pi/2)[0]
return intg / np.abs(beta) / 2.
else:
return 1/(1+x**2)/np.pi
@staticmethod
def _cdf_single_value_zolotarev(x, alpha, beta):
"""Calculate cdf using Zolotarev's methods as detailed in [BS].
"""
zeta = -beta*np.tan(np.pi*alpha/2.)
if alpha != 1:
x0 = x + zeta # convert to S_0 parameterization
xi = np.arctan(-zeta)/alpha
def V(theta):
return np.cos(alpha*xi)**(1/(alpha-1)) * \
(np.cos(theta)/np.sin(alpha*(xi+theta)))**(alpha/(alpha-1)) * \
(np.cos(alpha*xi+(alpha-1)*theta)/np.cos(theta))
if x0 > zeta:
c_1 = 1 if alpha > 1 else .5 - xi/np.pi
def f(theta):
z = np.complex128(x0 - zeta)
return np.exp(-V(theta) * np.real(z**(alpha/(alpha-1))))
with np.errstate(all="ignore"):
# spare calculating integral on null set
# use isclose as macos has fp differences
if np.isclose(-xi, np.pi/2, rtol=1e-014, atol=1e-014):
intg = 0
else:
intg = integrate.quad(f, -xi, np.pi/2)[0]
return c_1 + np.sign(1-alpha) * intg / np.pi
elif x0 == zeta:
return .5 - xi/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, alpha, -beta)
else:
# since location zero, no need to reposition x for S_0 parameterization
xi = np.pi/2
if beta > 0:
def V(theta):
expr_1 = np.pi/2+beta*theta
return 2. * expr_1 * np.exp(expr_1*np.tan(theta)/beta) / np.cos(theta) / np.pi
with np.errstate(all="ignore"):
expr_1 = np.exp(-np.pi*x/beta/2.)
int_1 = integrate.quad(lambda theta: np.exp(-expr_1 * V(theta)), -np.pi/2, np.pi/2)[0]
return int_1 / np.pi
elif beta == 0:
return .5 + np.arctan(x)/np.pi
else:
return 1 - levy_stable_gen._cdf_single_value_zolotarev(-x, 1, -beta)
def _pdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
pdf_default_method_name = getattr(self, 'pdf_default_method', 'best')
if pdf_default_method_name == 'best':
pdf_single_value_method = levy_stable_gen._pdf_single_value_best
elif pdf_default_method_name == 'zolotarev':
pdf_single_value_method = levy_stable_gen._pdf_single_value_zolotarev
else:
pdf_single_value_method = levy_stable_gen._pdf_single_value_cf_integrate
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack(list({tuple(row) for row in
data_in[:, 1:]}))
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([pdf_single_value_method(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn('Density calculations experimental for FFT method.' +
' Use combination of zolatarev and quadrature methods instead.', RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = np.ceil(np.log(2*np.max(np.abs(_x))/h)/np.log(2)) + 2 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.interp1d(density_x, np.real(density))
data_out[data_mask] = f(_x)
return data_out.T[0]
def _cdf(self, x, alpha, beta):
x = np.asarray(x).reshape(1, -1)[0,:]
x, alpha, beta = np.broadcast_arrays(x, alpha, beta)
data_in = np.dstack((x, alpha, beta))[0]
data_out = np.empty(shape=(len(data_in),1))
fft_min_points_threshold = getattr(self, 'pdf_fft_min_points_threshold', None)
fft_grid_spacing = getattr(self, 'pdf_fft_grid_spacing', 0.001)
fft_n_points_two_power = getattr(self, 'pdf_fft_n_points_two_power', None)
# group data in unique arrays of alpha, beta pairs
uniq_param_pairs = np.vstack(
list({tuple(row) for row in data_in[:,1:]}))
for pair in uniq_param_pairs:
data_mask = np.all(data_in[:,1:] == pair, axis=-1)
data_subset = data_in[data_mask]
if fft_min_points_threshold is None or len(data_subset) < fft_min_points_threshold:
data_out[data_mask] = np.array([levy_stable._cdf_single_value_zolotarev(_x, _alpha, _beta)
for _x, _alpha, _beta in data_subset]).reshape(len(data_subset), 1)
else:
warnings.warn("FFT method is considered experimental for "
"cumulative distribution function "
"evaluations. Use Zolotarev's method instead.",
RuntimeWarning)
_alpha, _beta = pair
_x = data_subset[:,(0,)]
# need enough points to "cover" _x for interpolation
h = fft_grid_spacing
q = 16 if fft_n_points_two_power is None else int(fft_n_points_two_power)
density_x, density = levy_stable_gen._pdf_from_cf_with_fft(lambda t: levy_stable_gen._cf(t, _alpha, _beta), h=h, q=q)
f = interpolate.InterpolatedUnivariateSpline(density_x, np.real(density))
data_out[data_mask] = np.array([f.integral(self.a, x_1) for x_1 in _x]).reshape(data_out[data_mask].shape)
return data_out.T[0]
def _fitstart(self, data):
# We follow McCullock 1986 method - Simple Consistent Estimators
# of Stable Distribution Parameters
# Table III and IV
nu_alpha_range = [2.439, 2.5, 2.6, 2.7, 2.8, 3, 3.2, 3.5, 4, 5, 6, 8, 10, 15, 25]
nu_beta_range = [0, 0.1, 0.2, 0.3, 0.5, 0.7, 1]
# table III - alpha = psi_1(nu_alpha, nu_beta)
alpha_table = [
[2.000, 2.000, 2.000, 2.000, 2.000, 2.000, 2.000],
[1.916, 1.924, 1.924, 1.924, 1.924, 1.924, 1.924],
[1.808, 1.813, 1.829, 1.829, 1.829, 1.829, 1.829],
[1.729, 1.730, 1.737, 1.745, 1.745, 1.745, 1.745],
[1.664, 1.663, 1.663, 1.668, 1.676, 1.676, 1.676],
[1.563, 1.560, 1.553, 1.548, 1.547, 1.547, 1.547],
[1.484, 1.480, 1.471, 1.460, 1.448, 1.438, 1.438],
[1.391, 1.386, 1.378, 1.364, 1.337, 1.318, 1.318],
[1.279, 1.273, 1.266, 1.250, 1.210, 1.184, 1.150],
[1.128, 1.121, 1.114, 1.101, 1.067, 1.027, 0.973],
[1.029, 1.021, 1.014, 1.004, 0.974, 0.935, 0.874],
[0.896, 0.892, 0.884, 0.883, 0.855, 0.823, 0.769],
[0.818, 0.812, 0.806, 0.801, 0.780, 0.756, 0.691],
[0.698, 0.695, 0.692, 0.689, 0.676, 0.656, 0.597],
[0.593, 0.590, 0.588, 0.586, 0.579, 0.563, 0.513]]
# table IV - beta = psi_2(nu_alpha, nu_beta)
beta_table = [
[0, 2.160, 1.000, 1.000, 1.000, 1.000, 1.000],
[0, 1.592, 3.390, 1.000, 1.000, 1.000, 1.000],
[0, 0.759, 1.800, 1.000, 1.000, 1.000, 1.000],
[0, 0.482, 1.048, 1.694, 1.000, 1.000, 1.000],
[0, 0.360, 0.760, 1.232, 2.229, 1.000, 1.000],
[0, 0.253, 0.518, 0.823, 1.575, 1.000, 1.000],
[0, 0.203, 0.410, 0.632, 1.244, 1.906, 1.000],
[0, 0.165, 0.332, 0.499, 0.943, 1.560, 1.000],
[0, 0.136, 0.271, 0.404, 0.689, 1.230, 2.195],
[0, 0.109, 0.216, 0.323, 0.539, 0.827, 1.917],
[0, 0.096, 0.190, 0.284, 0.472, 0.693, 1.759],
[0, 0.082, 0.163, 0.243, 0.412, 0.601, 1.596],
[0, 0.074, 0.147, 0.220, 0.377, 0.546, 1.482],
[0, 0.064, 0.128, 0.191, 0.330, 0.478, 1.362],
[0, 0.056, 0.112, 0.167, 0.285, 0.428, 1.274]]
# Table V and VII
alpha_range = [2, 1.9, 1.8, 1.7, 1.6, 1.5, 1.4, 1.3, 1.2, 1.1, 1, 0.9, 0.8, 0.7, 0.6, 0.5]
beta_range = [0, 0.25, 0.5, 0.75, 1]
# Table V - nu_c = psi_3(alpha, beta)
nu_c_table = [
[1.908, 1.908, 1.908, 1.908, 1.908],
[1.914, 1.915, 1.916, 1.918, 1.921],
[1.921, 1.922, 1.927, 1.936, 1.947],
[1.927, 1.930, 1.943, 1.961, 1.987],
[1.933, 1.940, 1.962, 1.997, 2.043],
[1.939, 1.952, 1.988, 2.045, 2.116],
[1.946, 1.967, 2.022, 2.106, 2.211],
[1.955, 1.984, 2.067, 2.188, 2.333],
[1.965, 2.007, 2.125, 2.294, 2.491],
[1.980, 2.040, 2.205, 2.435, 2.696],
[2.000, 2.085, 2.311, 2.624, 2.973],
[2.040, 2.149, 2.461, 2.886, 3.356],
[2.098, 2.244, 2.676, 3.265, 3.912],
[2.189, 2.392, 3.004, 3.844, 4.775],
[2.337, 2.634, 3.542, 4.808, 6.247],
[2.588, 3.073, 4.534, 6.636, 9.144]]
# Table VII - nu_zeta = psi_5(alpha, beta)
nu_zeta_table = [
[0, 0.000, 0.000, 0.000, 0.000],
[0, -0.017, -0.032, -0.049, -0.064],
[0, -0.030, -0.061, -0.092, -0.123],
[0, -0.043, -0.088, -0.132, -0.179],
[0, -0.056, -0.111, -0.170, -0.232],
[0, -0.066, -0.134, -0.206, -0.283],
[0, -0.075, -0.154, -0.241, -0.335],
[0, -0.084, -0.173, -0.276, -0.390],
[0, -0.090, -0.192, -0.310, -0.447],
[0, -0.095, -0.208, -0.346, -0.508],
[0, -0.098, -0.223, -0.380, -0.576],
[0, -0.099, -0.237, -0.424, -0.652],
[0, -0.096, -0.250, -0.469, -0.742],
[0, -0.089, -0.262, -0.520, -0.853],
[0, -0.078, -0.272, -0.581, -0.997],
[0, -0.061, -0.279, -0.659, -1.198]]
psi_1 = interpolate.interp2d(nu_beta_range, nu_alpha_range, alpha_table, kind='linear')
psi_2 = interpolate.interp2d(nu_beta_range, nu_alpha_range, beta_table, kind='linear')
psi_2_1 = lambda nu_beta, nu_alpha: psi_2(nu_beta, nu_alpha) if nu_beta > 0 else -psi_2(-nu_beta, nu_alpha)
phi_3 = interpolate.interp2d(beta_range, alpha_range, nu_c_table, kind='linear')
phi_3_1 = lambda beta, alpha: phi_3(beta, alpha) if beta > 0 else phi_3(-beta, alpha)
phi_5 = interpolate.interp2d(beta_range, alpha_range, nu_zeta_table, kind='linear')
phi_5_1 = lambda beta, alpha: phi_5(beta, alpha) if beta > 0 else -phi_5(-beta, alpha)
# quantiles
p05 = np.percentile(data, 5)
p50 = np.percentile(data, 50)
p95 = np.percentile(data, 95)
p25 = np.percentile(data, 25)
p75 = np.percentile(data, 75)
nu_alpha = (p95 - p05)/(p75 - p25)
nu_beta = (p95 + p05 - 2*p50)/(p95 - p05)
if nu_alpha >= 2.439:
alpha = np.clip(psi_1(nu_beta, nu_alpha)[0], np.finfo(float).eps, 2.)
beta = np.clip(psi_2_1(nu_beta, nu_alpha)[0], -1., 1.)
else:
alpha = 2.0
beta = np.sign(nu_beta)
c = (p75 - p25) / phi_3_1(beta, alpha)[0]
zeta = p50 + c*phi_5_1(beta, alpha)[0]
delta = np.clip(zeta-beta*c*np.tan(np.pi*alpha/2.) if alpha == 1. else zeta, np.finfo(float).eps, np.inf)
return (alpha, beta, delta, c)
def _stats(self, alpha, beta):
mu = 0 if alpha > 1 else np.nan
mu2 = 2 if alpha == 2 else np.inf
g1 = 0. if alpha == 2. else np.NaN
g2 = 0. if alpha == 2. else np.NaN
return mu, mu2, g1, g2
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
r"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is:
.. math::
f(x) = \frac{\exp(-x)}
{(1+\exp(-x))^2}
`logistic` is a special case of `genlogistic` with ``c=1``.
%(after_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.logistic(size=size)
def _pdf(self, x):
# logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
return np.exp(self._logpdf(x))
def _logpdf(self, x):
y = -np.abs(x)
return y - 2. * sc.log1p(np.exp(y))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# https://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
# if user has provided `floc` or `fscale`, fall back on super fit
# method. This scenario is not suitable for solving a system of
# equations
if floc is not None or fscale is not None:
return super(logistic_gen, self).fit(data, *args, **kwds)
# rv_continuous provided guesses
loc, scale = self._fitstart(data)
# account for user provided guesses
loc = kwds.pop('loc', loc)
scale = kwds.pop('scale', scale)
# the maximum likelihood estimators `a` and `b` of the location and
# scale parameters are roots of the two equations described in `func`.
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings, and
# Peacock (2000), Page 130
def func(params, data):
a, b = params
n = len(data)
c = (data - a) / b
x1 = np.sum(sc.expit(c)) - n/2
x2 = np.sum(c*np.tanh(c/2)) - n
return x1, x2
return tuple(optimize.root(func, (loc, scale), args=(data,)).x)
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
r"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is:
.. math::
f(x, c) = \frac{\exp(c x - \exp(x))}
{\Gamma(c)}
for all :math:`x, c > 0`. Here, :math:`\Gamma` is the
gamma function (`scipy.special.gamma`).
`loggamma` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return np.log(random_state.gamma(c, size=size))
def _pdf(self, x, c):
# loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
r"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is:
.. math::
f(x, c) = \begin{cases}\frac{c}{2} x^{ c-1} &\text{for } 0 < x < 1\\
\frac{c}{2} x^{-c-1} &\text{for } x \ge 1
\end{cases}
for :math:`c > 0`.
`loglaplace` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
# loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
# = c / 2 * x**(-c-1), for x >= 1
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
r"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is:
.. math::
f(x, s) = \frac{1}{s x \sqrt{2\pi}}
\exp\left(-\frac{\log^2(x)}{2s^2}\right)
for :math:`x > 0`, :math:`s > 0`.
`lognorm` takes ``s`` as a shape parameter for :math:`s`.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s, size=None, random_state=None):
return np.exp(s * random_state.standard_normal(size))
def _pdf(self, x, s):
# lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
@extend_notes_in_docstring(rv_continuous, notes="""\
When the location parameter is fixed by using the `floc` argument,
this function uses explicit formulas for the maximum likelihood
estimation of the log-normal shape and scale parameters, so the
`optimizer`, `loc` and `scale` keyword arguments are ignored.\n\n""")
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(lognorm_gen, self).fit(data, *args, **kwds)
f0 = (kwds.get('f0', None) or kwds.get('fs', None) or
kwds.get('fix_s', None))
fscale = kwds.get('fscale', None)
if len(args) > 1:
raise TypeError("Too many input arguments.")
for name in ['f0', 'fs', 'fix_s', 'floc', 'fscale', 'loc', 'scale',
'optimizer']:
kwds.pop(name, None)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# Special case: loc is fixed. Use the maximum likelihood formulas
# instead of the numerical solver.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
floc = float(floc)
if floc != 0:
# Shifting the data by floc. Don't do the subtraction in-place,
# because `data` might be a view of the input array.
data = data - floc
if np.any(data <= 0):
raise FitDataError("lognorm", lower=floc, upper=np.inf)
lndata = np.log(data)
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free.
scale = np.exp(lndata.mean())
if f0 is None:
# shape is free.
shape = lndata.std()
else:
# shape is fixed.
shape = float(f0)
else:
# scale is fixed, shape is free
scale = float(fscale)
shape = np.sqrt(((lndata - np.log(scale))**2).mean())
return shape, floc, scale
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
r"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is:
.. math::
f(x) = \frac{1}{x \sqrt{2\pi}} \exp(-\frac{1}{2} (\log(x))^2)
`gilbrat` is a special case of `lognorm` with ``s=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return np.exp(random_state.standard_normal(size))
def _pdf(self, x):
# gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
r"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df=3``, ``loc=0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is:
.. math::
f(x) = \sqrt{2/\pi}x^2 \exp(-x^2/2)
for :math:`x >= 0`.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return chi.rvs(3.0, size=size, random_state=random_state)
def _pdf(self, x):
# maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
return _SQRT_2_OVER_PI*x*x*np.exp(-x*x/2.0)
def _logpdf(self, x):
return _LOG_SQRT_2_OVER_PI + 2*np.log(x) - 0.5*x*x
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
r"""A Mielke Beta-Kappa / Dagum continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is:
.. math::
f(x, k, s) = \frac{k x^{k-1}}{(1+x^s)^{1+k/s}}
for :math:`x > 0` and :math:`k, s > 0`. The distribution is sometimes
called Dagum distribution ([2]_). It was already defined in [3]_, called
a Burr Type III distribution (`burr` with parameters ``c=s`` and
``d=k/s``).
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
References
----------
.. [1] Mielke, P.W., 1973 "Another Family of Distributions for Describing
and Analyzing Precipitation Data." J. Appl. Meteor., 12, 275-280
.. [2] Dagum, C., 1977 "A new model for personal income distribution."
Economie Appliquee, 33, 327-367.
.. [3] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
def _argcheck(self, k, s):
return (k > 0) & (s > 0)
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _logpdf(self, x, k, s):
return np.log(k) + np.log(x)*(k-1.0) - np.log1p(x**s)*(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
def _munp(self, n, k, s):
def nth_moment(n, k, s):
# n-th moment is defined for -k < n < s
return sc.gamma((k+n)/s)*sc.gamma(1-n/s)/sc.gamma(k/s)
return _lazywhere(n < s, (n, k, s), nth_moment, np.inf)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
r"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is:
.. math::
f(x, h, k) = (1 - k x)^{1/k - 1} (1 - h (1 - k x)^{1/k})^{1/h-1}
if :math:`h` and :math:`k` are not equal to 0.
If :math:`h` or :math:`k` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes :math:`h` and :math:`k` as shape parameters.
The kappa4 distribution returns other distributions when certain
:math:`h` and :math:`k` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
https://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
https://digitalcommons.lsu.edu/gradschool_dissertations/3672
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
:doi:`10.4236/jwarp.2012.410101`
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
return h == h
def _get_support(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - float_power(h, -k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
_a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
_b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return _a, _b
def _pdf(self, x, h, k):
# kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
# (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
r"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa3` is:
.. math::
f(x, a) = a (a + x^a)^{-(a + 1)/a}
for :math:`x > 0` and :math:`a > 0`.
`kappa3` takes ``a`` as a shape parameter for :math:`a`.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
:doi:`10.1175/1520-0493(1973)101<0701:TKDMLE>2.3.CO;2`
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012), :doi:`10.4236/ojs.2012.24050`
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
# kappa3.pdf(x, a) = a*(a + x**a)**(-(a + 1)/a), for x > 0
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class moyal_gen(rv_continuous):
r"""A Moyal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `moyal` is:
.. math::
f(x) = \exp(-(x + \exp(-x))/2) / \sqrt{2\pi}
for a real number :math:`x`.
%(after_notes)s
This distribution has utility in high-energy physics and radiation
detection. It describes the energy loss of a charged relativistic
particle due to ionization of the medium [1]_. It also provides an
approximation for the Landau distribution. For an in depth description
see [2]_. For additional description, see [3]_.
References
----------
.. [1] J.E. Moyal, "XXX. Theory of ionization fluctuations",
The London, Edinburgh, and Dublin Philosophical Magazine
and Journal of Science, vol 46, 263-280, (1955).
:doi:`10.1080/14786440308521076` (gated)
.. [2] G. Cordeiro et al., "The beta Moyal: a useful skew distribution",
International Journal of Research and Reviews in Applied Sciences,
vol 10, 171-192, (2012).
http://www.arpapress.com/Volumes/Vol10Issue2/IJRRAS_10_2_02.pdf
.. [3] C. Walck, "Handbook on Statistical Distributions for
Experimentalists; International Report SUF-PFY/96-01", Chapter 26,
University of Stockholm: Stockholm, Sweden, (2007).
http://www.stat.rice.edu/~dobelman/textfiles/DistributionsHandbook.pdf
.. versionadded:: 1.1.0
%(example)s
"""
def _rvs(self, size=None, random_state=None):
u1 = gamma.rvs(a = 0.5, scale = 2, size=size, random_state=random_state)
return -np.log(u1)
def _pdf(self, x):
return np.exp(-0.5 * (x + np.exp(-x))) / np.sqrt(2*np.pi)
def _cdf(self, x):
return sc.erfc(np.exp(-0.5 * x) / np.sqrt(2))
def _sf(self, x):
return sc.erf(np.exp(-0.5 * x) / np.sqrt(2))
def _ppf(self, x):
return -np.log(2 * sc.erfcinv(x)**2)
def _stats(self):
mu = np.log(2) + np.euler_gamma
mu2 = np.pi**2 / 2
g1 = 28 * np.sqrt(2) * sc.zeta(3) / np.pi**3
g2 = 4.
return mu, mu2, g1, g2
def _munp(self, n):
if n == 1.0:
return np.log(2) + np.euler_gamma
elif n == 2.0:
return np.pi**2 / 2 + (np.log(2) + np.euler_gamma)**2
elif n == 3.0:
tmp1 = 1.5 * np.pi**2 * (np.log(2)+np.euler_gamma)
tmp2 = (np.log(2)+np.euler_gamma)**3
tmp3 = 14 * sc.zeta(3)
return tmp1 + tmp2 + tmp3
elif n == 4.0:
tmp1 = 4 * 14 * sc.zeta(3) * (np.log(2) + np.euler_gamma)
tmp2 = 3 * np.pi**2 * (np.log(2) + np.euler_gamma)**2
tmp3 = (np.log(2) + np.euler_gamma)**4
tmp4 = 7 * np.pi**4 / 4
return tmp1 + tmp2 + tmp3 + tmp4
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n)
moyal = moyal_gen(name="moyal")
class nakagami_gen(rv_continuous):
r"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is:
.. math::
f(x, \nu) = \frac{2 \nu^\nu}{\Gamma(\nu)} x^{2\nu-1} \exp(-\nu x^2)
for :math:`x >= 0`, :math:`\nu > 0`.
`nakagami` takes ``nu`` as a shape parameter for :math:`\nu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
return np.exp(self._logpdf(x, nu))
def _logpdf(self, x, nu):
# nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
# x**(2*nu-1) * exp(-nu*x**2)
return (np.log(2) + sc.xlogy(nu, nu) - sc.gammaln(nu) +
sc.xlogy(2*nu - 1, x) - nu*x**2)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _sf(self, x, nu):
return sc.gammaincc(nu, nu*x*x)
def _isf(self, p, nu):
return np.sqrt(1/nu * sc.gammainccinv(nu, p))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
r"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is:
.. math::
f(x, k, \lambda) = \frac{1}{2} \exp(-(\lambda+x)/2)
(x/\lambda)^{(k-2)/4} I_{(k-2)/2}(\sqrt{\lambda x})
for :math:`x >= 0` and :math:`k, \lambda > 0`. :math:`k` specifies the
degrees of freedom (denoted ``df`` in the implementation) and
:math:`\lambda` is the non-centrality parameter (denoted ``nc`` in the
implementation). :math:`I_\nu` denotes the modified Bessel function of
first order of degree :math:`\nu` (`scipy.special.iv`).
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc >= 0)
def _rvs(self, df, nc, size=None, random_state=None):
return random_state.noncentral_chisquare(df, nc, size)
def _logpdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_log_pdf, f2=chi2.logpdf)
def _pdf(self, x, df, nc):
# ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
# * I[(df-2)/2](sqrt(nc*x))
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_pdf, f2=chi2.pdf)
def _cdf(self, x, df, nc):
cond = np.ones_like(x, dtype=bool) & (nc != 0)
return _lazywhere(cond, (x, df, nc), f=_ncx2_cdf, f2=chi2.cdf)
def _ppf(self, q, df, nc):
cond = np.ones_like(q, dtype=bool) & (nc != 0)
return _lazywhere(cond, (q, df, nc), f=sc.chndtrix, f2=chi2.ppf)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
r"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is:
.. math::
f(x, n_1, n_2, \lambda) =
\exp\left(\frac{\lambda}{2} +
\lambda n_1 \frac{x}{2(n_1 x + n_2)}
\right)
n_1^{n_1/2} n_2^{n_2/2} x^{n_1/2 - 1} \\
(n_2 + n_1 x)^{-(n_1 + n_2)/2}
\gamma(n_1/2) \gamma(1 + n_2/2) \\
\frac{L^{\frac{n_1}{2}-1}_{n_2/2}
\left(-\lambda n_1 \frac{x}{2(n_1 x + n_2)}\right)}
{B(n_1/2, n_2/2)
\gamma\left(\frac{n_1 + n_2}{2}\right)}
for :math:`n_1, n_2 > 0`, :math:`\lambda\geq 0`. Here :math:`n_1` is the
degrees of freedom in the numerator, :math:`n_2` the degrees of freedom in
the denominator, :math:`\lambda` the non-centrality parameter,
:math:`\gamma` is the logarithm of the Gamma function, :math:`L_n^k` is a
generalized Laguerre polynomial and :math:`B` is the beta function.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters. If ``nc=0``,
the distribution becomes equivalent to the Fisher distribution.
%(after_notes)s
See Also
--------
scipy.stats.f : Fisher distribution
%(example)s
"""
def _argcheck(self, df1, df2, nc):
return (df1 > 0) & (df2 > 0) & (nc >= 0)
def _rvs(self, dfn, dfd, nc, size=None, random_state=None):
return random_state.noncentral_f(dfn, dfd, nc, size)
def _pdf_skip(self, x, dfn, dfd, nc):
# ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
# df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
# (df2+df1*x)**(-(df1+df2)/2) *
# gamma(df1/2)*gamma(1+df2/2) *
# L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
# (B(v1/2, v2/2) * gamma((v1+v2)/2))
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
# Note: the rv_continuous class ensures that dfn > 0 when this function
# is called, so we don't have to check for division by zero with dfn
# in the following.
mu_num = dfd * (dfn + nc)
mu_den = dfn * (dfd - 2)
mu = np.full_like(mu_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu_num, mu_den, where=dfd > 2, out=mu)
mu2_num = 2*((dfn + nc)**2 + (dfn + 2*nc)*(dfd - 2))*(dfd/dfn)**2
mu2_den = (dfd - 2)**2 * (dfd - 4)
mu2 = np.full_like(mu2_num, dtype=np.float64, fill_value=np.inf)
np.true_divide(mu2_num, mu2_den, where=dfd > 4, out=mu2)
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
r"""A Student's t continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is:
.. math::
f(x, \nu) = \frac{\Gamma((\nu+1)/2)}
{\sqrt{\pi \nu} \Gamma(\nu/2)}
(1+x^2/\nu)^{-(\nu+1)/2}
where :math:`x` is a real number and the degrees of freedom parameter
:math:`\nu` (denoted ``df`` in the implementation) satisfies
:math:`\nu > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
%(after_notes)s
%(example)s
"""
def _argcheck(self, df):
return df > 0
def _rvs(self, df, size=None, random_state=None):
return random_state.standard_t(df, size=size)
def _pdf(self, x, df):
# gamma((df+1)/2)
# t.pdf(x, df) = ---------------------------------------------------
# sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu = np.where(df > 1, 0.0, np.inf)
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
mu2 = np.where(df <= 1, np.nan, mu2)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.inf)
g2 = np.where(df <= 2, np.nan, g2)
return mu, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
r"""A non-central Student's t continuous random variable.
%(before_notes)s
Notes
-----
If :math:`Y` is a standard normal random variable and :math:`V` is
an independent chi-square random variable (`chi2`) with :math:`k` degrees
of freedom, then
.. math::
X = \frac{Y + c}{\sqrt{V/k}}
has a non-central Student's t distribution on the real line.
The degrees of freedom parameter :math:`k` (denoted ``df`` in the
implementation) satisfies :math:`k > 0` and the noncentrality parameter
:math:`c` (denoted ``nc`` in the implementation) is a real number.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc, size=None, random_state=None):
n = norm.rvs(loc=nc, size=size, random_state=random_state)
c2 = chi2.rvs(df, size=size, random_state=random_state)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. https://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
# kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
r"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is:
.. math::
f(x, b) = \frac{b}{x^{b+1}}
for :math:`x \ge 1`, :math:`b > 0`.
`pareto` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
# pareto.pdf(x, b) = b / x**(b+1)
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _sf(self, x, b):
return x**(-b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = np.full(np.shape(b), fill_value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = np.full(np.shape(b), fill_value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = np.full(np.shape(b), fill_value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = np.full(np.shape(b), fill_value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
def fit(self, data, *args, **kwds):
parameters = _check_fit_input_parameters(self, data, args, kwds)
data, fshape, floc, fscale = parameters
if floc is None:
return super(pareto_gen, self).fit(data, **kwds)
if np.any(data - floc < (fscale if fscale else 0)):
raise FitDataError("pareto", lower=1, upper=np.inf)
data = data - floc
# Source: Evans, Hastings, and Peacock (2000), Statistical
# Distributions, 3rd. Ed., John Wiley and Sons. Page 149.
if fscale is None:
fscale = np.min(data)
if fshape is None:
fshape = 1/((1/len(data)) * np.sum(np.log(data/fscale)))
return fshape, floc, fscale
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, \kappa) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{\kappa}
\alpha = \beta^2 = \frac{4}{\kappa^2}
\zeta = -\frac{\alpha}{\beta} = -\beta
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
Pass the skew :math:`\kappa` into `pearson3` as the shape parameter
``skew``.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
m = 0.0
v = 1.0
s = skew
k = 1.5*skew**2
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
# use logpdf instead of _logpdf to fix issue mentioned in gh-12640
# (_logpdf does not return correct result for alpha = 1)
ans[invmask] = np.log(abs(beta)) + gamma.logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
# use cdf instead of _cdf to fix issue mentioned in gh-12640
# (_cdf produces NaNs for inputs outside support)
ans[invmask1a] = gamma.cdf(transx[invmask1b], alpha[invmask1b])
# The gamma._cdf approach wasn't working with negative skew.
# Note that multiplying the skew by -1 reflects about x=0.
# So instead of evaluating the CDF with negative skew at x,
# evaluate the SF with positive skew at -x.
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
# gamma._sf produces NaNs when transx < 0, so use gamma.sf
ans[invmask2a] = gamma.sf(transx[invmask2b], alpha[invmask2b])
return ans
def _rvs(self, skew, size=None, random_state=None):
skew = np.broadcast_to(skew, size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = random_state.standard_normal(nsmall)
ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
if size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
r"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is:
.. math::
f(x, a) = a x^{a-1}
for :math:`0 \le x \le 1`, :math:`a > 0`.
`powerlaw` takes ``a`` as a shape parameter for :math:`a`.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b=1``.
%(example)s
"""
def _pdf(self, x, a):
# powerlaw.pdf(x, a) = a * x**(a-1)
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
r"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is:
.. math::
f(x, c, s) = \frac{c}{x s} \phi(\log(x)/s)
(\Phi(-\log(x)/s))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x > 0`, :math:`s, c > 0`.
`powerlognorm` takes :math:`c` and :math:`s` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
# powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
# (Phi(-log(x)/s))**(c-1),
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
r"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is:
.. math::
f(x, c) = c \phi(x) (\Phi(-x))^{c-1}
where :math:`\phi` is the normal pdf, and :math:`\Phi` is the normal cdf,
and :math:`x >= 0`, :math:`c > 0`.
`powernorm` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
# powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
r"""An R-distributed (symmetric beta) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is:
.. math::
f(x, c) = \frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)}
for :math:`-1 \le x \le 1`, :math:`c > 0`. `rdist` is also called the
symmetric beta distribution: if B has a `beta` distribution with
parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with
parameter c.
`rdist` takes ``c`` as a shape parameter for :math:`c`.
This distribution includes the following distribution kernels as
special cases::
c = 2: uniform
c = 3: `semicircular`
c = 4: Epanechnikov (parabolic)
c = 6: quartic (biweight)
c = 8: triweight
%(after_notes)s
%(example)s
"""
# use relation to the beta distribution for pdf, cdf, etc
def _pdf(self, x, c):
return 0.5*beta._pdf((x + 1)/2, c/2, c/2)
def _logpdf(self, x, c):
return -np.log(2) + beta._logpdf((x + 1)/2, c/2, c/2)
def _cdf(self, x, c):
return beta._cdf((x + 1)/2, c/2, c/2)
def _ppf(self, q, c):
return 2*beta._ppf(q, c/2, c/2) - 1
def _rvs(self, c, size=None, random_state=None):
return 2 * random_state.beta(c/2, c/2, size) - 1
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
r"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is:
.. math::
f(x) = x \exp(-x^2/2)
for :math:`x \ge 0`.
`rayleigh` is a special case of `chi` with ``df=2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return chi.rvs(2, size=size, random_state=random_state)
def _pdf(self, r):
# rayleigh.pdf(r) = r * exp(-r**2/2)
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
@extend_notes_in_docstring(rv_continuous, notes="""\
When the scale parameter is fixed by using the `fscale` argument,
this function uses the default optimization method to determine
the MLE. If the location parameter is fixed by using the `floc`
argument, the analytical formula for the estimate of the scale
is used. If neither parameter is fixed, the analytical MLE for
`scale` is used as a function of `loc` in numerical optimization
of `loc`, injecting corresponding analytical optimum for
`scale`.\n\n""")
def fit(self, data, *args, **kwds):
data, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
def scale_mle(loc, data):
# Source: Statistical Distributions, 3rd Edition. Evans, Hastings,
# and Peacock (2000), Page 175
return (np.sum((data - loc) ** 2) / (2 * len(data))) ** .5
if floc is not None:
# `loc` is fixed, analytically determine `scale`.
if np.any(data - floc <= 0):
raise FitDataError("rayleigh", lower=1, upper=np.inf)
else:
return floc, scale_mle(floc, data)
if fscale is not None:
# `scale` is fixed, but we cannot analytically determine `loc`, so
# we use the superclass fit method.
return super(rayleigh_gen,
self).fit(data, *args, **kwds)
else:
# `floc` and `fscale` are not fixed. Use the analytical MLE for
# `scale` as a function of `loc` in numerical optimization of
# `loc`, injecting corresponding analytical optimum for `scale`.
# account for user provided guesses
loc, scale = self._fitstart(data) # rv_continuous provided guesses
loc = kwds.pop('loc', loc) # only `loc` user guesses are relevant
# the second argument rv_continuous._reduce_func returns is the
# log-likelihood function. Its inputs are the initial guesses for
# `loc` and `scale`, but these are not modified in the scenario
# where neither `floc` or `fscale` is provided in kwds.
_, ll, _, _ = self._reduce_func((loc, scale), kwds)
optimizer = _fit_determine_optimizer(kwds.pop('optimizer',
optimize.fmin))
# wrap log-likelihood function to optimize only over `loc`
def func(loc, data):
return ll([loc, scale_mle(loc, data)], data)
loc = optimizer(func, loc, args=(data,), disp=0)
return loc[0], scale_mle(loc, data)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
r"""A loguniform or reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for this class is:
.. math::
f(x, a, b) = \frac{1}{x \log(b/a)}
for :math:`a \le x \le b`, :math:`b > a > 0`. This class takes
:math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
This doesn't show the equal probability of ``0.01``, ``0.1`` and
``1``. This is best when the x-axis is log-scaled:
>>> import numpy as np
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log10(r))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$10^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
This random variable will be log-uniform regardless of the base chosen for
``a`` and ``b``. Let's specify with base ``2`` instead:
>>> rvs = %(name)s(2**-2, 2**0).rvs(size=1000)
Values of ``1/4``, ``1/2`` and ``1`` are equally likely with this random
variable. Here's the histogram:
>>> fig, ax = plt.subplots(1, 1)
>>> ax.hist(np.log2(rvs))
>>> ax.set_ylabel("Frequency")
>>> ax.set_xlabel("Value of random variable")
>>> ax.xaxis.set_major_locator(plt.FixedLocator([-2, -1, 0]))
>>> ticks = ["$2^{{ {} }}$".format(i) for i in [-2, -1, 0]]
>>> ax.set_xticklabels(ticks) # doctest: +SKIP
>>> plt.show()
"""
def _argcheck(self, a, b):
return (a > 0) & (b > a)
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
# reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
return 1.0 / (x * np.log(b * 1.0 / a))
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(np.log(b * 1.0 / a))
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / np.log(b * 1.0 / a)
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/np.log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b*1.0/a))
loguniform = reciprocal_gen(name="loguniform")
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
r"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is:
.. math::
f(x, b) = x \exp(- \frac{x^2 + b^2}{2}) I_0(x b)
for :math:`x >= 0`, :math:`b > 0`. :math:`I_0` is the modified Bessel
function of order zero (`scipy.special.i0`).
`rice` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
The Rice distribution describes the length, :math:`r`, of a 2-D vector with
components :math:`(U+u, V+v)`, where :math:`U, V` are constant, :math:`u,
v` are independent Gaussian random variables with standard deviation
:math:`s`. Let :math:`R = \sqrt{U^2 + V^2}`. Then the pdf of :math:`r` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b, size=None, random_state=None):
# https://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + random_state.standard_normal(size=(2,) + size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
#
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
r"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is:
.. math::
f(x, \mu) = \frac{1}{\sqrt{2\pi x}}
\exp\left(\frac{-(1-\mu x)^2}{2\mu^2x}\right)
for :math:`x \ge 0`.
`recipinvgauss` takes ``mu`` as a shape parameter for :math:`\mu`.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
# recipinvgauss.pdf(x, mu) =
# 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu, size=None, random_state=None):
return 1.0/random_state.wald(mu, 1.0, size=size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
r"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is:
.. math::
f(x) = \frac{2}{\pi} \sqrt{1-x^2}
for :math:`-1 \le x \le 1`.
The distribution is a special case of `rdist` with `c = 3`.
%(after_notes)s
See Also
--------
rdist
References
----------
.. [1] "Wigner semicircle distribution",
https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _logpdf(self, x):
return np.log(2/np.pi) + 0.5*np.log1p(-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _ppf(self, q):
return rdist._ppf(q, 3)
def _rvs(self, size=None, random_state=None):
# generate values uniformly distributed on the area under the pdf
# (semi-circle) by randomly generating the radius and angle
r = np.sqrt(random_state.uniform(size=size))
a = np.cos(np.pi * random_state.uniform(size=size))
return r * a
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
r"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2 * norm.pdf(x) * norm.cdf(a*x)
`skewnorm` takes a real number :math:`a` as a skewness parameter
When ``a = 0`` the distribution is identical to a normal distribution
(`norm`). `rvs` implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
:arxiv:`0911.2093`
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
if x <= 0:
cdf = integrate.quad(self._pdf, _a, x, args=args)[0]
else:
t1 = integrate.quad(self._pdf, _a, 0, args=args)[0]
t2 = integrate.quad(self._pdf, 0, x, args=args)[0]
cdf = t1 + t2
if cdf > 1:
# Presumably numerical noise, e.g. 1.0000000000000002
cdf = 1.0
return cdf
def _sf(self, x, a):
return self._cdf(-x, -a)
def _rvs(self, a, size=None, random_state=None):
u0 = random_state.normal(size=size)
v = random_state.normal(size=size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapezoid_gen(rv_continuous):
r"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``. This
defines the trapezoid base from ``loc`` to ``(loc+scale)`` and the flat
top from ``c`` to ``d`` proportional to the position along the base
with ``0 <= c <= d <= 1``. When ``c=d``, this is equivalent to `triang`
with the same values for `loc`, `scale` and `c`.
The method of [1]_ is used for computing moments.
`trapezoid` takes :math:`c` and :math:`d` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
References
----------
.. [1] Kacker, R.N. and Lawrence, J.F. (2007). Trapezoidal and triangular
distributions for Type B evaluation of standard uncertainty.
Metrologia 44, 117-127. :doi:`10.1088/0026-1394/44/2/003`
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d-c+1)
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d, u: u * x / c,
lambda x, c, d, u: u,
lambda x, c, d, u: u * (1-x) / (1-d)],
(x, c, d, u))
def _cdf(self, x, c, d):
return _lazyselect([x < c,
(c <= x) & (x <= d),
x > d],
[lambda x, c, d: x**2 / c / (d-c+1),
lambda x, c, d: (c + 2 * (x-c)) / (d-c+1),
lambda x, c, d: 1-((1-x) ** 2
/ (d-c+1) / (1-d))],
(x, c, d))
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
def _munp(self, n, c, d):
# Using the parameterization from Kacker, 2007, with
# a=bottom left, c=top left, d=top right, b=bottom right, then
# E[X^n] = h/(n+1)/(n+2) [(b^{n+2}-d^{n+2})/(b-d)
# - ((c^{n+2} - a^{n+2})/(c-a)]
# with h = 2/((b-a) - (d-c)). The corresponding parameterization
# in scipy, has a'=loc, c'=loc+c*scale, d'=loc+d*scale, b'=loc+scale,
# which for standard form reduces to a'=0, b'=1, c'=c, d'=d.
# Substituting into E[X^n] gives the bd' term as (1 - d^{n+2})/(1 - d)
# and the ac' term as c^{n-1} for the standard form. The bd' term has
# numerical difficulties near d=1, so replace (1 - d^{n+2})/(1-d)
# with expm1((n+2)*log(d))/(d-1).
# Testing with n=18 for c=(1e-30,1-eps) shows that this is stable.
# We still require an explicit test for d=1 to prevent divide by zero,
# and now a test for d=0 to prevent log(0).
ab_term = c**(n+1)
dc_term = _lazyselect(
[d == 0.0, (0.0 < d) & (d < 1.0), d == 1.0],
[lambda d: 1.0,
lambda d: np.expm1((n+2) * np.log(d)) / (d-1.0),
lambda d: n+2],
[d])
val = 2.0 / (1.0+d-c) * (dc_term - ab_term) / ((n+1) * (n+2))
return val
def _entropy(self, c, d):
# Using the parameterization from Wikipedia (van Dorp, 2003)
# with a=bottom left, c=top left, d=top right, b=bottom right
# gives a'=loc, b'=loc+c*scale, c'=loc+d*scale, d'=loc+scale,
# which for loc=0, scale=1 is a'=0, b'=c, c'=d, d'=1.
# Substituting into the entropy formula from Wikipedia gives
# the following result.
return 0.5 * (1.0-d+c) / (1.0+d-c) + np.log(0.5 * (1.0+d-c))
trapezoid = trapezoid_gen(a=0.0, b=1.0, name="trapezoid")
# Note: alias kept for backwards compatibility. Rename was done
# because trapz is a slur in colloquial English (see gh-12924).
trapz = trapezoid_gen(a=0.0, b=1.0, name="trapz")
if trapz.__doc__:
trapz.__doc__ = "trapz is an alias for `trapezoid`"
class triang_gen(rv_continuous):
r"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc + scale)``.
`triang` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c, size=None, random_state=None):
return random_state.triangular(0, c, 1, size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
# 0: edge case where c=0
# 1: generalised case for x < c, don't use x <= c, as it doesn't cope
# with c = 0.
# 2: generalised case for x >= c, but doesn't cope with c = 1
# 3: edge case where c=1
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2 - 2 * x,
lambda x, c: 2 * x / c,
lambda x, c: 2 * (1 - x) / (1 - c),
lambda x, c: 2 * x],
(x, c))
return r
def _cdf(self, x, c):
r = _lazyselect([c == 0,
x < c,
(x >= c) & (c != 1),
c == 1],
[lambda x, c: 2*x - x*x,
lambda x, c: x * x / c,
lambda x, c: (x*x - 2*x + c) / (c-1),
lambda x, c: x * x],
(x, c))
return r
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c * q), 1-np.sqrt((1-c) * (1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
r"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is:
.. math::
f(x, b) = \frac{\exp(-x)}{1 - \exp(-b)}
for :math:`0 <= x <= b`.
`truncexpon` takes ``b`` as a shape parameter for :math:`b`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
return b > 0
def _get_support(self, b):
return self.a, b
def _pdf(self, x, b):
# truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
TRUNCNORM_TAIL_X = 30
TRUNCNORM_MAX_BRENT_ITERS = 40
def _truncnorm_get_delta_scalar(a, b):
if (a > TRUNCNORM_TAIL_X) or (b < -TRUNCNORM_TAIL_X):
return 0
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
return delta
def _truncnorm_get_delta(a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_get_delta_scalar(a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_get_delta_scalar(a.item(), b.item())
delta = np.zeros(np.shape(a))
condinner = (a <= TRUNCNORM_TAIL_X) & (b >= -TRUNCNORM_TAIL_X)
conda = (a > 0) & condinner
condb = (a <= 0) & condinner
if np.any(conda):
np.place(delta, conda, _norm_sf(a[conda]) - _norm_sf(b[conda]))
if np.any(condb):
np.place(delta, condb, _norm_cdf(b[condb]) - _norm_cdf(a[condb]))
delta[delta < 0] = 0
return delta
def _truncnorm_get_logdelta_scalar(a, b):
if (a <= TRUNCNORM_TAIL_X) and (b >= -TRUNCNORM_TAIL_X):
if a > 0:
delta = _norm_sf(a) - _norm_sf(b)
else:
delta = _norm_cdf(b) - _norm_cdf(a)
delta = max(delta, 0)
if delta > 0:
return np.log(delta)
if b < 0 or (np.abs(a) >= np.abs(b)):
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
logdelta = nlb + np.log1p(-np.exp(nla - nlb))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
logdelta = sla + np.log1p(-np.exp(slb - sla))
return logdelta
def _truncnorm_logpdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return -np.inf
if x > b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, -np.inf)
if np.any(condgtb):
np.place(out, condgtb, -np.inf)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
_logdelta = _truncnorm_get_logdelta_scalar(a, b)
np.place(out, cond_inner, _norm_logpdf(x[cond_inner]) - _logdelta)
return (out[0] if (shp == ()) else out)
def _truncnorm_pdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x < a:
return 0.0
if x > b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlta, condgtb = (x < a), (x > b)
if np.any(condlta):
np.place(out, condlta, 0.0)
if np.any(condgtb):
np.place(out, condgtb, 0.0)
cond_inner = ~condlta & ~condgtb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, _norm_pdf(x[cond_inner]) / delta)
else:
np.place(out, cond_inner,
np.exp(_truncnorm_logpdf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logcdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -np.inf
if x >= b:
return 0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, -np.inf)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
np.log((_norm_cdf(x[cond_inner]) - _norm_cdf(a)) / delta))
else:
with np.errstate(divide='ignore'):
if a < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
tab = np.log1p(-np.exp(nla - nlb))
nlx = _norm_logcdf(x[cond_inner])
tax = np.log1p(-np.exp(nla - nlx))
np.place(out, cond_inner, nlx + tax - (nlb + tab))
else:
sla = _norm_logsf(a)
slb = _norm_logsf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logsf(x[cond_inner]) - sla))
- np.log1p(-np.exp(slb - sla)))
return (out[0] if (shp == ()) else out)
def _truncnorm_cdf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return -0
if x >= b:
return 1
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, 1.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner,
(_norm_cdf(x[cond_inner]) - _norm_cdf(a)) / delta)
else:
with np.errstate(divide='ignore'):
np.place(out, cond_inner,
np.exp(_truncnorm_logcdf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _truncnorm_logsf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 0.0
if x >= b:
return -np.inf
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 0)
if np.any(condgeb):
np.place(out, condgeb, -np.inf)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, np.log((_norm_sf(x[cond_inner]) - _norm_sf(b)) / delta))
else:
with np.errstate(divide='ignore'):
if b < 0:
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
np.place(out, cond_inner,
np.log1p(-np.exp(_norm_logcdf(x[cond_inner]) - nlb))
- np.log1p(-np.exp(nla - nlb)))
else:
sla, slb = _norm_logsf(a), _norm_logsf(b)
tab = np.log1p(-np.exp(slb - sla))
slx = _norm_logsf(x[cond_inner])
tax = np.log1p(-np.exp(slb - slx))
np.place(out, cond_inner, slx + tax - (sla + tab))
return (out[0] if (shp == ()) else out)
def _truncnorm_sf_scalar(x, a, b):
with np.errstate(invalid='ignore'):
if np.isscalar(x):
if x <= a:
return 1.0
if x >= b:
return 0.0
shp = np.shape(x)
x = np.atleast_1d(x)
out = np.full_like(x, np.nan, dtype=np.double)
condlea, condgeb = (x <= a), (x >= b)
if np.any(condlea):
np.place(out, condlea, 1.0)
if np.any(condgeb):
np.place(out, condgeb, 0.0)
cond_inner = ~condlea & ~condgeb
if np.any(cond_inner):
delta = _truncnorm_get_delta_scalar(a, b)
if delta > 0:
np.place(out, cond_inner, (_norm_sf(x[cond_inner]) - _norm_sf(b)) / delta)
else:
np.place(out, cond_inner, np.exp(_truncnorm_logsf_scalar(x[cond_inner], a, b)))
return (out[0] if (shp == ()) else out)
def _norm_logcdfprime(z):
# derivative of special.log_ndtr (See special/cephes/ndtr.c)
# Differentiate formula for log Phi(z)_truncnorm_ppf
# log Phi(z) = -z^2/2 - log(-z) - log(2pi)/2 + log(1 + sum (-1)^n (2n-1)!! / z^(2n))
# Convergence of series is slow for |z| < 10, but can use d(log Phi(z))/dz = dPhi(z)/dz / Phi(z)
# Just take the first 10 terms because that is sufficient for use in _norm_ilogcdf
assert np.all(z <= -10)
lhs = -z - 1/z
denom_cons = 1/z**2
numerator = 1
pwr = 1.0
denom_total, numerator_total = 0, 0
sign = -1
for i in range(1, 11):
pwr *= denom_cons
numerator *= 2 * i - 1
term = sign * numerator * pwr
denom_total += term
numerator_total += term * (2 * i) / z
sign = -sign
return lhs - numerator_total / (1 + denom_total)
def _norm_ilogcdf(y):
"""Inverse function to _norm_logcdf==sc.log_ndtr."""
# Apply approximate Newton-Raphson
# Only use for very negative values of y.
# At minimum requires y <= -(log(2pi)+2^2)/2 ~= -2.9
# Much better convergence for y <= -10
z = -np.sqrt(-2 * (y + np.log(2*np.pi)/2))
for _ in range(4):
z = z - (_norm_logcdf(z) - y) / _norm_logcdfprime(z)
return z
def _truncnorm_ppf_scalar(q, a, b):
shp = np.shape(q)
q = np.atleast_1d(q)
out = np.zeros(np.shape(q))
condle0, condge1 = (q <= 0), (q >= 1)
if np.any(condle0):
out[condle0] = a
if np.any(condge1):
out[condge1] = b
delta = _truncnorm_get_delta_scalar(a, b)
cond_inner = ~condle0 & ~condge1
if np.any(cond_inner):
qinner = q[cond_inner]
if delta > 0:
if a > 0:
sa, sb = _norm_sf(a), _norm_sf(b)
np.place(out, cond_inner,
_norm_isf(qinner * sb + sa * (1.0 - qinner)))
else:
na, nb = _norm_cdf(a), _norm_cdf(b)
np.place(out, cond_inner, _norm_ppf(qinner * nb + na * (1.0 - qinner)))
elif np.isinf(b):
np.place(out, cond_inner,
-_norm_ilogcdf(np.log1p(-qinner) + _norm_logsf(a)))
elif np.isinf(a):
np.place(out, cond_inner,
_norm_ilogcdf(np.log(q) + _norm_logcdf(b)))
else:
if b < 0:
# Solve norm_logcdf(x) = norm_logcdf(a) + log1p(q * (expm1(norm_logcdf(b) - norm_logcdf(a)))
# = nla + log1p(q * expm1(nlb - nla))
# = nlb + log(q) + log1p((1-q) * exp(nla - nlb)/q)
def _f_cdf(x, c):
return _norm_logcdf(x) - c
nla, nlb = _norm_logcdf(a), _norm_logcdf(b)
values = nlb + np.log(q[cond_inner])
C = np.exp(nla - nlb)
if C:
one_minus_q = (1 - q)[cond_inner]
values += np.log1p(one_minus_q * C / q[cond_inner])
x = [optimize.zeros.brentq(_f_cdf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS)for c in values]
np.place(out, cond_inner, x)
else:
# Solve norm_logsf(x) = norm_logsf(b) + log1p((1-q) * (expm1(norm_logsf(a) - norm_logsf(b)))
# = slb + log1p((1-q)[cond_inner] * expm1(sla - slb))
# = sla + log(1-q) + log1p(q * np.exp(slb - sla)/(1-q))
def _f_sf(x, c):
return _norm_logsf(x) - c
sla, slb = _norm_logsf(a), _norm_logsf(b)
one_minus_q = (1-q)[cond_inner]
values = sla + np.log(one_minus_q)
C = np.exp(slb - sla)
if C:
values += np.log1p(q[cond_inner] * C / one_minus_q)
x = [optimize.zeros.brentq(_f_sf, a, b, args=(c,),
maxiter=TRUNCNORM_MAX_BRENT_ITERS) for c in values]
np.place(out, cond_inner, x)
out[out < a] = a
out[out > b] = b
return (out[0] if (shp == ()) else out)
class truncnorm_gen(rv_continuous):
r"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes :math:`a` and :math:`b` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return a < b
def _get_support(self, a, b):
return a, b
def _pdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_pdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_pdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly','allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_pdf_scalar(_x, _a, _b)
return it.operands[3]
def _logpdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logpdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logpdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly','allocate']])
for (_x, _a, _b, _ld) in it:
_ld[...] = _truncnorm_logpdf_scalar(_x, _a, _b)
return it.operands[3]
def _cdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_cdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_cdf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_cdf_scalar(_x, _a, _b)
return it.operands[3]
def _logcdf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logcdf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logcdf_scalar(x, a.item(), b.item())
it = np.nditer([x, a, b, None], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logcdf_scalar(_x, _a, _b)
return it.operands[3]
def _sf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_sf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_sf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_sf_scalar(_x, _a, _b)
return it.operands[3]
def _logsf(self, x, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_logsf_scalar(x, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_logsf_scalar(x, a.item(), b.item())
out = None
it = np.nditer([x, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_x, _a, _b, _p) in it:
_p[...] = _truncnorm_logsf_scalar(_x, _a, _b)
return it.operands[3]
def _ppf(self, q, a, b):
if np.isscalar(a) and np.isscalar(b):
return _truncnorm_ppf_scalar(q, a, b)
a, b = np.atleast_1d(a), np.atleast_1d(b)
if a.size == 1 and b.size == 1:
return _truncnorm_ppf_scalar(q, a.item(), b.item())
out = None
it = np.nditer([q, a, b, out], [],
[['readonly'], ['readonly'], ['readonly'], ['writeonly', 'allocate']])
for (_q, _a, _b, _x) in it:
_x[...] = _truncnorm_ppf_scalar(_q, _a, _b)
return it.operands[3]
def _munp(self, n, a, b):
def n_th_moment(n, a, b):
"""
Returns n-th moment. Defined only if n >= 0.
Function cannot broadcast due to the loop over n
"""
pA, pB = self._pdf([a, b], a, b)
probs = [pA, -pB]
moments = [0, 1]
for k in range(1, n+1):
# a or b might be infinite, and the corresponding pdf value
# is 0 in that case, but nan is returned for the
# multiplication. However, as b->infinity, pdf(b)*b**k -> 0.
# So it is safe to use _lazywhere to avoid the nan.
vals = _lazywhere(probs, [probs, [a, b]],
lambda x, y: x * y**(k-1), fillvalue=0)
mk = np.sum(vals) + (k-1) * moments[-2]
moments.append(mk)
return moments[-1]
return _lazywhere((n >= 0) & (a == a) & (b == b), (n, a, b),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.nan)
def _stats(self, a, b, moments='mv'):
pA, pB = self._pdf(np.array([a, b]), a, b)
m1 = pA - pB
mu = m1
# use _lazywhere to avoid nan (See detailed comment in _munp)
probs = [pA, -pB]
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y,
fillvalue=0)
m2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a-mu, b-mu]], lambda x, y: x*y,
fillvalue=0)
# mu2 = m2 - mu**2, but not as numerically stable as:
# mu2 = (a-mu)*pA - (b-mu)*pB + 1
mu2 = 1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**2,
fillvalue=0)
m3 = 2*m1 + np.sum(vals)
vals = _lazywhere(probs, [probs, [a, b]], lambda x, y: x*y**3,
fillvalue=0)
m4 = 3*m2 + np.sum(vals)
mu3 = m3 + m1 * (-3*m2 + 2*m1**2)
g1 = mu3 / np.power(mu2, 1.5)
mu4 = m4 + m1*(-4*m3 + 3*m1*(2*m2 - m1**2))
g2 = mu4 / mu2**2 - 3
return mu, mu2, g1, g2
def _rvs(self, a, b, size=None, random_state=None):
# if a and b are scalar, use _rvs_scalar, otherwise need to create
# output by iterating over parameters
if np.isscalar(a) and np.isscalar(b):
out = self._rvs_scalar(a, b, size, random_state=random_state)
elif a.size == 1 and b.size == 1:
out = self._rvs_scalar(a.item(), b.item(), size, random_state=random_state)
else:
# When this method is called, size will be a (possibly empty)
# tuple of integers. It will not be None; if `size=None` is passed
# to `rvs()`, size will be the empty tuple ().
a, b = np.broadcast_arrays(a, b)
# a and b now have the same shape.
# `shp` is the shape of the blocks of random variates that are
# generated for each combination of parameters associated with
# broadcasting a and b.
# bc is a tuple the same length as size. The values
# in bc are bools. If bc[j] is True, it means that
# entire axis is filled in for a given combination of the
# broadcast arguments.
shp, bc = _check_shape(a.shape, size)
# `numsamples` is the total number of variates to be generated
# for each combination of the input arguments.
numsamples = int(np.prod(shp))
# `out` is the array to be returned. It is filled in in the
# loop below.
out = np.empty(size)
it = np.nditer([a, b],
flags=['multi_index'],
op_flags=[['readonly'], ['readonly']])
while not it.finished:
# Convert the iterator's multi_index into an index into the
# `out` array where the call to _rvs_scalar() will be stored.
# Where bc is True, we use a full slice; otherwise we use the
# index value from it.multi_index. len(it.multi_index) might
# be less than len(bc), and in that case we want to align these
# two sequences to the right, so the loop variable j runs from
# -len(size) to 0. This doesn't cause an IndexError, as
# bc[j] will be True in those cases where it.multi_index[j]
# would cause an IndexError.
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
out[idx] = self._rvs_scalar(it[0], it[1], numsamples, random_state).reshape(shp)
it.iternext()
if size == ():
out = out.item()
return out
def _rvs_scalar(self, a, b, numsamples=None, random_state=None):
if not numsamples:
numsamples = 1
# prepare sampling of rvs
size1d = tuple(np.atleast_1d(numsamples))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# Calculate some rvs
U = random_state.uniform(low=0, high=1, size=N)
x = self._ppf(U, a, b)
rvs = np.reshape(x, size1d)
return rvs
truncnorm = truncnorm_gen(name='truncnorm', momtype=1)
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
r"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (:math:`lambda = -1`)
- logistic (:math:`lambda = 0`)
- approx Normal (:math:`lambda = 0.14`)
- uniform from -1 to 1 (:math:`lambda = 1`)
`tukeylambda` takes a real number :math:`lambda` (denoted ``lam``
in the implementation) as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class FitUniformFixedScaleDataError(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
"data.ptp() <= fscale, but data.ptp() = %r and fscale = %r." %
(ptp, fscale),
)
class uniform_gen(rv_continuous):
r"""A uniform continuous random variable.
In the standard form, the distribution is uniform on ``[0, 1]``. Using
the parameters ``loc`` and ``scale``, one obtains the uniform distribution
on ``[loc, loc + scale]``.
%(before_notes)s
%(example)s
"""
def _rvs(self, size=None, random_state=None):
return random_state.uniform(0.0, 1.0, size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
def fit(self, data, *args, **kwds):
"""
Maximum likelihood estimate for the location and scale parameters.
`uniform.fit` uses only the following parameters. Because exact
formulas are used, the parameters related to optimization that are
available in the `fit` method of other distributions are ignored
here. The only positional argument accepted is `data`.
Parameters
----------
data : array_like
Data to use in calculating the maximum likelihood estimate.
floc : float, optional
Hold the location parameter fixed to the specified value.
fscale : float, optional
Hold the scale parameter fixed to the specified value.
Returns
-------
loc, scale : float
Maximum likelihood estimates for the location and scale.
Notes
-----
An error is raised if `floc` is given and any values in `data` are
less than `floc`, or if `fscale` is given and `fscale` is less
than ``data.max() - data.min()``. An error is also raised if both
`floc` and `fscale` are given.
Examples
--------
>>> from scipy.stats import uniform
We'll fit the uniform distribution to `x`:
>>> x = np.array([2, 2.5, 3.1, 9.5, 13.0])
For a uniform distribution MLE, the location is the minimum of the
data, and the scale is the maximum minus the minimum.
>>> loc, scale = uniform.fit(x)
>>> loc
2.0
>>> scale
11.0
If we know the data comes from a uniform distribution where the support
starts at 0, we can use `floc=0`:
>>> loc, scale = uniform.fit(x, floc=0)
>>> loc
0.0
>>> scale
13.0
Alternatively, if we know the length of the support is 12, we can use
`fscale=12`:
>>> loc, scale = uniform.fit(x, fscale=12)
>>> loc
1.5
>>> scale
12.0
In that last example, the support interval is [1.5, 13.5]. This
solution is not unique. For example, the distribution with ``loc=2``
and ``scale=12`` has the same likelihood as the one above. When
`fscale` is given and it is larger than ``data.max() - data.min()``,
the parameters returned by the `fit` method center the support over
the interval ``[data.min(), data.max()]``.
"""
if len(args) > 0:
raise TypeError("Too many arguments.")
floc = kwds.pop('floc', None)
fscale = kwds.pop('fscale', None)
_remove_optimizer_parameters(kwds)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
# MLE for the uniform distribution
# --------------------------------
# The PDF is
#
# f(x, loc, scale) = {1/scale for loc <= x <= loc + scale
# {0 otherwise}
#
# The likelihood function is
# L(x, loc, scale) = (1/scale)**n
# where n is len(x), assuming loc <= x <= loc + scale for all x.
# The log-likelihood is
# l(x, loc, scale) = -n*log(scale)
# The log-likelihood is maximized by making scale as small as possible,
# while keeping loc <= x <= loc + scale. So if neither loc nor scale
# are fixed, the log-likelihood is maximized by choosing
# loc = x.min()
# scale = x.ptp()
# If loc is fixed, it must be less than or equal to x.min(), and then
# the scale is
# scale = x.max() - loc
# If scale is fixed, it must not be less than x.ptp(). If scale is
# greater than x.ptp(), the solution is not unique. Note that the
# likelihood does not depend on loc, except for the requirement that
# loc <= x <= loc + scale. All choices of loc for which
# x.max() - scale <= loc <= x.min()
# have the same log-likelihood. In this case, we choose loc such that
# the support is centered over the interval [data.min(), data.max()]:
# loc = x.min() = 0.5*(scale - x.ptp())
if fscale is None:
# scale is not fixed.
if floc is None:
# loc is not fixed, scale is not fixed.
loc = data.min()
scale = data.ptp()
else:
# loc is fixed, scale is not fixed.
loc = floc
scale = data.max() - loc
if data.min() < loc:
raise FitDataError("uniform", lower=loc, upper=loc + scale)
else:
# loc is not fixed, scale is fixed.
ptp = data.ptp()
if ptp > fscale:
raise FitUniformFixedScaleDataError(ptp=ptp, fscale=fscale)
# If ptp < fscale, the ML estimate is not unique; see the comments
# above. We choose the distribution for which the support is
# centered over the interval [data.min(), data.max()].
loc = data.min() - 0.5*(fscale - ptp)
scale = fscale
# We expect the return values to be floating point, so ensure it
# by explicitly converting to float.
return float(loc), float(scale)
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
r"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `vonmises` and `vonmises_line` is:
.. math::
f(x, \kappa) = \frac{ \exp(\kappa \cos(x)) }{ 2 \pi I_0(\kappa) }
for :math:`-\pi \le x \le \pi`, :math:`\kappa > 0`. :math:`I_0` is the
modified Bessel function of order zero (`scipy.special.i0`).
`vonmises` is a circular distribution which does not restrict the
distribution to a fixed interval. Currently, there is no circular
distribution framework in scipy. The ``cdf`` is implemented such that
``cdf(x + 2*np.pi) == cdf(x) + 1``.
`vonmises_line` is the same distribution, defined on :math:`[-\pi, \pi]`
on the real line. This is a regular (i.e. non-circular) distribution.
`vonmises` and `vonmises_line` take ``kappa`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, kappa, size=None, random_state=None):
return random_state.vonmises(0.0, kappa, size=size)
def _pdf(self, x, kappa):
# vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
# = exp(kappa * (cos(x) - 1)) /
# (2*pi*exp(-kappa)*I[0](kappa))
# = exp(kappa * cosm1(x)) / (2*pi*i0e(kappa))
return np.exp(kappa*sc.cosm1(x)) / (2*np.pi*sc.i0e(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
r"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is:
.. math::
f(x) = \frac{1}{\sqrt{2\pi x^3}} \exp(- \frac{ (x-1)^2 }{ 2x })
for :math:`x >= 0`.
`wald` is a special case of `invgauss` with ``mu=1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, size=None, random_state=None):
return random_state.wald(1.0, 1.0, size=size)
def _pdf(self, x):
# wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
r"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is:
.. math::
f(x, c) = \frac{1-c^2}{2\pi (1+c^2 - 2c \cos(x))}
for :math:`0 \le x \le 2\pi`, :math:`0 < c < 1`.
`wrapcauchy` takes ``c`` as a shape parameter for :math:`c`.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
# wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
r"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_:
.. math::
f(x, \beta) = \frac{\beta}{2 \Gamma(1/\beta)} \exp(-|x|^\beta)
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to a Laplace distribution.
For :math:`\beta = 2`, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
r"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is:
.. math::
f(x, \beta) = \frac{\beta}{\Gamma(1/\beta)} \exp(-|x|^\beta)
for :math:`x > 0`. :math:`\Gamma` is the gamma function
(`scipy.special.gamma`).
`gennorm` takes ``beta`` as a shape parameter for :math:`\beta`.
For :math:`\beta = 1`, it is identical to an exponential distribution.
For :math:`\beta = 2`, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
# beta
# halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
# gamma(1/beta)
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
class crystalball_gen(rv_continuous):
r"""
Crystalball distribution
%(before_notes)s
Notes
-----
The probability density function for `crystalball` is:
.. math::
f(x, \beta, m) = \begin{cases}
N \exp(-x^2 / 2), &\text{for } x > -\beta\\
N A (B - x)^{-m} &\text{for } x \le -\beta
\end{cases}
where :math:`A = (m / |\beta|)^n \exp(-\beta^2 / 2)`,
:math:`B = m/|\beta| - |\beta|` and :math:`N` is a normalisation constant.
`crystalball` takes :math:`\beta > 0` and :math:`m > 1` as shape
parameters. :math:`\beta` defines the point where the pdf changes
from a power-law to a Gaussian distribution. :math:`m` is the power
of the power-law tail.
References
----------
.. [1] "Crystal Ball Function",
https://en.wikipedia.org/wiki/Crystal_Ball_function
%(after_notes)s
.. versionadded:: 0.19.0
%(example)s
"""
def _pdf(self, x, beta, m):
"""
Return PDF of the crystalball function.
--
| exp(-x**2 / 2), for x > -beta
crystalball.pdf(x, beta, m) = N * |
| A * (B - x)**(-m), for x <= -beta
--
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return np.exp(-x**2 / 2)
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _logpdf(self, x, beta, m):
"""
Return the log of the PDF of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return -x**2/2
def lhs(x, beta, m):
return m*np.log(m/beta) - beta**2/2 - m*np.log(m/beta - beta - x)
return np.log(N) + _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _cdf(self, x, beta, m):
"""
Return CDF of the crystalball function
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def rhs(x, beta, m):
return ((m/beta) * np.exp(-beta**2 / 2.0) / (m-1) +
_norm_pdf_C * (_norm_cdf(x) - _norm_cdf(-beta)))
def lhs(x, beta, m):
return ((m/beta)**m * np.exp(-beta**2 / 2.0) *
(m/beta - beta - x)**(-m+1) / (m-1))
return N * _lazywhere(x > -beta, (x, beta, m), f=rhs, f2=lhs)
def _ppf(self, p, beta, m):
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
pbeta = N * (m/beta) * np.exp(-beta**2/2) / (m - 1)
def ppf_less(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return (m/beta - beta -
((m - 1)*(m/beta)**(-m)/eb2*p/N)**(1/(1-m)))
def ppf_greater(p, beta, m):
eb2 = np.exp(-beta**2/2)
C = (m/beta) * eb2 / (m-1)
N = 1/(C + _norm_pdf_C * _norm_cdf(beta))
return _norm_ppf(_norm_cdf(-beta) + (1/_norm_pdf_C)*(p/N - C))
return _lazywhere(p < pbeta, (p, beta, m), f=ppf_less, f2=ppf_greater)
def _munp(self, n, beta, m):
"""
Returns the n-th non-central moment of the crystalball function.
"""
N = 1.0 / (m/beta / (m-1) * np.exp(-beta**2 / 2.0) +
_norm_pdf_C * _norm_cdf(beta))
def n_th_moment(n, beta, m):
"""
Returns n-th moment. Defined only if n+1 < m
Function cannot broadcast due to the loop over n
"""
A = (m/beta)**m * np.exp(-beta**2 / 2.0)
B = m/beta - beta
rhs = (2**((n-1)/2.0) * sc.gamma((n+1)/2) *
(1.0 + (-1)**n * sc.gammainc((n+1)/2, beta**2 / 2)))
lhs = np.zeros(rhs.shape)
for k in range(n + 1):
lhs += (sc.binom(n, k) * B**(n-k) * (-1)**k / (m - k - 1) *
(m/beta)**(-m + k + 1))
return A * lhs + rhs
return N * _lazywhere(n + 1 < m, (n, beta, m),
np.vectorize(n_th_moment, otypes=[np.float64]),
np.inf)
def _argcheck(self, beta, m):
"""
Shape parameter bounds are m > 1 and beta > 0.
"""
return (m > 1) & (beta > 0)
crystalball = crystalball_gen(name='crystalball', longname="A Crystalball Function")
def _argus_phi(chi):
"""
Utility function for the argus distribution
used in the CDF and norm of the Argus Funktion
"""
return _norm_cdf(chi) - chi * _norm_pdf(chi) - 0.5
class argus_gen(rv_continuous):
r"""
Argus distribution
%(before_notes)s
Notes
-----
The probability density function for `argus` is:
.. math::
f(x, \chi) = \frac{\chi^3}{\sqrt{2\pi} \Psi(\chi)} x \sqrt{1-x^2}
\exp(-\chi^2 (1 - x^2)/2)
for :math:`0 < x < 1` and :math:`\chi > 0`, where
.. math::
\Psi(\chi) = \Phi(\chi) - \chi \phi(\chi) - 1/2
with :math:`\Phi` and :math:`\phi` being the CDF and PDF of a standard
normal distribution, respectively.
`argus` takes :math:`\chi` as shape a parameter.
%(after_notes)s
.. versionadded:: 0.19.0
References
----------
.. [1] "ARGUS distribution",
https://en.wikipedia.org/wiki/ARGUS_distribution
%(example)s
"""
def _pdf(self, x, chi):
y = 1.0 - x**2
A = chi**3 / (_norm_pdf_C * _argus_phi(chi))
return A * x * np.sqrt(y) * np.exp(-chi**2 * y / 2)
def _cdf(self, x, chi):
return 1.0 - self._sf(x, chi)
def _sf(self, x, chi):
return _argus_phi(chi * np.sqrt(1 - x**2)) / _argus_phi(chi)
def _rvs(self, chi, size=None, random_state=None):
chi = np.asarray(chi)
if chi.size == 1:
out = self._rvs_scalar(chi, numsamples=size,
random_state=random_state)
else:
shp, bc = _check_shape(chi.shape, size)
numsamples = int(np.prod(shp))
out = np.empty(size)
it = np.nditer([chi],
flags=['multi_index'],
op_flags=[['readonly']])
while not it.finished:
idx = tuple((it.multi_index[j] if not bc[j] else slice(None))
for j in range(-len(size), 0))
r = self._rvs_scalar(it[0], numsamples=numsamples,
random_state=random_state)
out[idx] = r.reshape(shp)
it.iternext()
if size == ():
out = out[()]
return out
def _rvs_scalar(self, chi, numsamples=None, random_state=None):
# if chi <= 2.611:
# use rejection method, see Devroye:
# Non-Uniform Random Variate Generation, 1986, section II.3.2.
# write: self.pdf = c * g(x) * h(x), where
# h is [0,1]-valued and g is a density
# g(x) = d1 * chi**2 * x * exp(-chi**2 * (1 - x**2) / 2), 0 <= x <= 1
# h(x) = sqrt(1 - x**2), 0 <= x <= 1
# Integrating g, we get:
# G(x) = d1 * exp(-chi**2 * (1 - x**2) / 2) - d2
# d1 and d2 are determined by G(0) = 0 and G(1) = 1
# d1 = 1 / (1 - exp(-0.5 * chi**2))
# d2 = 1 / (exp(0.5 * chi**2) - 1)
# => G(x) = (exp(chi**2 * x**2 /2) - 1) / (exp(chi**2 / 2) - 1)
# expected number of iterations is c with
# c = -np.expm1(-0.5 * chi**2) * chi / (_norm_pdf_C * _argus_phi(chi))
# note that G can be inverted easily, so we can sample
# rvs from this distribution
# G_inv(y) = sqrt(2 * log(1 + (exp(chi**2 / 2) - 1) * y) / chi**2)
# to avoid an overflow of exp(chi**2 / 2), it is convenient to write
# G_inv(y) = sqrt(1 + 2 * log(exp(-chi**2 / 2) * (1-y) + y) / chi**2)
#
# if chi > 2.611:
# use ratio of uniforms method applied to a transformed variable of X
# (X is ARGUS with parameter chi):
# Y = chi * sqrt(1 - X**2) has density proportional to
# u**2 * exp(-u**2 / 2) on [0, chi] (Maxwell distribution conditioned
# on [0, chi]). Apply ratio of uniforms to this density to generate
# samples of Y and convert back to X
#
# The expected number of iterations using the rejection method
# increases with increasing chi, whereas the expected number of
# iterations using the ratio of uniforms method decreases with
# increasing chi. The crossover occurs where
# chi*(1 - exp(-0.5*chi**2)) = 8*sqrt(2)*exp(-1.5) => chi ~ 2.611
# Switching algorithms at chi=2.611 means that the expected number of
# iterations is always below 2.2.
if chi <= 2.611:
# use rejection method
size1d = tuple(np.atleast_1d(numsamples))
N = int(np.prod(size1d))
x = np.zeros(N)
echi = np.exp(-chi**2 / 2)
simulated = 0
while simulated < N:
k = N - simulated
u = random_state.uniform(size=k)
v = random_state.uniform(size=k)
# acceptance condition: u <= h(G_inv(v)). This simplifies to
z = 2 * np.log(echi * (1 - v) + v) / chi**2
accept = (u**2 + z <= 0)
num_accept = np.sum(accept)
if num_accept > 0:
# rvs follow a distribution with density g: rvs = G_inv(v)
rvs = np.sqrt(1 + z[accept])
x[simulated:(simulated + num_accept)] = rvs
simulated += num_accept
return np.reshape(x, size1d)
else:
# use ratio of uniforms method
def f(x):
return np.where((x >= 0) & (x <= chi),
np.exp(2*np.log(x) - x**2/2), 0)
umax = np.sqrt(2) / np.exp(0.5)
vmax = 4 / np.exp(1)
z = rvs_ratio_uniforms(f, umax, 0, vmax, size=numsamples,
random_state=random_state)
return np.sqrt(1 - z*z / chi**2)
def _stats(self, chi):
chi2 = chi**2
phi = _argus_phi(chi)
m = np.sqrt(np.pi/8) * chi * sc.ive(1, chi2/4) / phi
v = (1 - 3 / chi2 + chi * _norm_pdf(chi) / phi) - m**2
return m, v, None, None
argus = argus_gen(name='argus', longname="An Argus Function", a=0.0, b=1.0)
class rv_histogram(rv_continuous):
"""
Generates a distribution given by a histogram.
This is useful to generate a template distribution from a binned
datasample.
As a subclass of the `rv_continuous` class, `rv_histogram` inherits from it
a collection of generic methods (see `rv_continuous` for the full list),
and implements them based on the properties of the provided binned
datasample.
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
Notes
-----
There are no additional shape parameters except for the loc and scale.
The pdf is defined as a stepwise function from the provided histogram
The cdf is a linear interpolation of the pdf.
.. versionadded:: 0.19.0
Examples
--------
Create a scipy.stats distribution from a numpy histogram
>>> import scipy.stats
>>> import numpy as np
>>> data = scipy.stats.norm.rvs(size=100000, loc=0, scale=1.5, random_state=123)
>>> hist = np.histogram(data, bins=100)
>>> hist_dist = scipy.stats.rv_histogram(hist)
Behaves like an ordinary scipy rv_continuous distribution
>>> hist_dist.pdf(1.0)
0.20538577847618705
>>> hist_dist.cdf(2.0)
0.90818568543056499
PDF is zero above (below) the highest (lowest) bin of the histogram,
defined by the max (min) of the original dataset
>>> hist_dist.pdf(np.max(data))
0.0
>>> hist_dist.cdf(np.max(data))
1.0
>>> hist_dist.pdf(np.min(data))
7.7591907244498314e-05
>>> hist_dist.cdf(np.min(data))
0.0
PDF and CDF follow the histogram
>>> import matplotlib.pyplot as plt
>>> X = np.linspace(-5.0, 5.0, 100)
>>> plt.title("PDF from Template")
>>> plt.hist(data, density=True, bins=100)
>>> plt.plot(X, hist_dist.pdf(X), label='PDF')
>>> plt.plot(X, hist_dist.cdf(X), label='CDF')
>>> plt.show()
"""
_support_mask = rv_continuous._support_mask
def __init__(self, histogram, *args, **kwargs):
"""
Create a new distribution using the given histogram
Parameters
----------
histogram : tuple of array_like
Tuple containing two array_like objects
The first containing the content of n bins
The second containing the (n+1) bin boundaries
In particular the return value np.histogram is accepted
"""
self._histogram = histogram
if len(histogram) != 2:
raise ValueError("Expected length 2 for parameter histogram")
self._hpdf = np.asarray(histogram[0])
self._hbins = np.asarray(histogram[1])
if len(self._hpdf) + 1 != len(self._hbins):
raise ValueError("Number of elements in histogram content "
"and histogram boundaries do not match, "
"expected n and n+1.")
self._hbin_widths = self._hbins[1:] - self._hbins[:-1]
self._hpdf = self._hpdf / float(np.sum(self._hpdf * self._hbin_widths))
self._hcdf = np.cumsum(self._hpdf * self._hbin_widths)
self._hpdf = np.hstack([0.0, self._hpdf, 0.0])
self._hcdf = np.hstack([0.0, self._hcdf])
# Set support
kwargs['a'] = self.a = self._hbins[0]
kwargs['b'] = self.b = self._hbins[-1]
super(rv_histogram, self).__init__(*args, **kwargs)
def _pdf(self, x):
"""
PDF of the histogram
"""
return self._hpdf[np.searchsorted(self._hbins, x, side='right')]
def _cdf(self, x):
"""
CDF calculated from the histogram
"""
return np.interp(x, self._hbins, self._hcdf)
def _ppf(self, x):
"""
Percentile function calculated from the histogram
"""
return np.interp(x, self._hcdf, self._hbins)
def _munp(self, n):
"""Compute the n-th non-central moment."""
integrals = (self._hbins[1:]**(n+1) - self._hbins[:-1]**(n+1)) / (n+1)
return np.sum(self._hpdf[1:-1] * integrals)
def _entropy(self):
"""Compute entropy of distribution"""
res = _lazywhere(self._hpdf[1:-1] > 0.0,
(self._hpdf[1:-1],),
np.log,
0.0)
return -np.sum(self._hpdf[1:-1] * res * self._hbin_widths)
def _updated_ctor_param(self):
"""
Set the histogram as additional constructor argument
"""
dct = super(rv_histogram, self)._updated_ctor_param()
dct['histogram'] = self._histogram
return dct
# Collect names of classes and objects in this module.
pairs = list(globals().copy().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| bsd-3-clause |
jesusfcr/airflow | airflow/www/views.py | 2 | 94324 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template, make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.logging import LoggingMixin
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils import logging as log_utils
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.configuration import AirflowConfigException
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
DEFAULT_SENSITIVE_VARIABLE_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def should_hide_value_for_key(key_name):
return any(s in key_name for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \
and conf.getboolean('admin', 'hide_sensitive_variable_fields')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
ds.update()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
session = Session()
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
if ti is None:
log = "*** Task instance did not exist in the DB\n"
else:
# load remote logs
remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_log_loaded = False
if remote_log_base:
remote_log_path = os.path.join(remote_log_base, log_relative)
remote_log = ""
# Only display errors reading the log if the task completed or ran at least
# once before (otherwise there won't be any remote log stored).
ti_execution_completed = ti.state in {State.SUCCESS, State.FAILED}
ti_ran_more_than_once = ti.try_number > 1
surface_log_retrieval_errors = (
ti_execution_completed or ti_ran_more_than_once)
# S3
if remote_log_path.startswith('s3:/'):
remote_log += log_utils.S3Log().read(
remote_log_path, return_error=surface_log_retrieval_errors)
remote_log_loaded = True
# GCS
elif remote_log_path.startswith('gs:/'):
remote_log += log_utils.GCSLog().read(
remote_log_path, return_error=surface_log_retrieval_errors)
remote_log_loaded = True
# unsupported
else:
remote_log += '*** Unsupported remote log location.'
if remote_log:
log += ('*** Reading remote log from {}.\n{}\n'.format(
remote_log_path, remote_log))
# We only want to display the
# local logs while the task is running if a remote log configuration is set up
# since the logs will be transfered there after the run completes.
# TODO(aoen): One problem here is that if a task is running on a worker it
# already ran on, then duplicate logs will be printed for all of the previous
# runs of the task that already completed since they will have been printed as
# part of the remote log section above. This can be fixed either by streaming
# logs to the log servers as tasks are running, or by creating a proper
# abstraction for multiple task instance runs).
if not remote_log_loaded or ti.state == State.RUNNING:
if os.path.exists(loc):
try:
f = open(loc)
log += "*** Reading local log.\n" + "".join(f.readlines())
f.close()
except:
log = "*** Failed to load local log file: {0}.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{ti.hostname}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
response.raise_for_status()
log += '\n' + response.text
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
if PY2 and not isinstance(log, unicode):
log = log.decode('utf-8')
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title="Log", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.now()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.now() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=600, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=600, width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.now()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
qry = session.query(DM)
qry_fltr = []
if do_filter and owner_mode == 'ldapgroup':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners.in_(current_user.ldap_groups)
).all()
elif do_filter and owner_mode == 'user':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
).all()
else:
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active
).all()
# optionally filter out "paused" dags
if hide_paused:
orm_dags = {dag.dag_id: dag for dag in qry_fltr if not dag.is_paused}
else:
orm_dags = {dag.dag_id: dag for dag in qry_fltr}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
all_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags,
orm_dags=orm_dags,
hide_paused=hide_paused,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if should_hide_value_for_key(model.key):
return Markup('*' * 8)
return getattr(model, name)
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
page_size = 20
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun)\
.filter(models.DagRun.id.in_(ids))\
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
"""
As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.delete_task_instances(ids)
else:
super(TaskInstanceModelView, self).action_delete(ids)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@provide_session
def delete_task_instances(self, ids, session=None):
try:
TI = models.TaskInstance
count = 0
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
count += session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).delete()
session.commit()
flash("{count} task instances were deleted".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to delete', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
self.logger.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
self.logger.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
qiime2/q2-diversity | q2_diversity/tests/test_beta_rarefaction.py | 2 | 14933 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import functools
import tempfile
import os
import skbio
import qiime2
from biom import Table
import numpy as np
import numpy.testing as npt
import pandas as pd
import scipy
from qiime2.plugin.testing import TestPluginBase
from q2_diversity import beta_rarefaction
from q2_diversity._beta._beta_rarefaction import (
_get_multiple_rarefaction, _upgma, _cluster_samples, _add_support_count,
_jackknifed_emperor)
class SharedSetup:
def setUp(self):
self.table = Table(np.array([[0, 1, 3],
[1, 1, 2],
[2, 1, 0]]),
['O1', 'O2', 'O3'], ['S1', 'S2', 'S3'])
self.tree = skbio.TreeNode.read([
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'])
self.md = qiime2.Metadata(
pd.DataFrame({'a': ['1', '2', '3']},
index=pd.Index(['S1', 'S2', 'S3'], name='id')))
self.output_dir_obj = tempfile.TemporaryDirectory(
prefix='q2-diversity-test-temp-')
self.output_dir = self.output_dir_obj.name
def tearDown(self):
self.output_dir_obj.cleanup()
class BetaRarefactionTests(SharedSetup, TestPluginBase):
package = 'q2_diversity.tests'
def check_heatmap(self, viz_dir, iterations, correlation_method):
heatmap_fp = os.path.join(viz_dir, 'heatmap.html')
self.assertTrue(os.path.exists(heatmap_fp))
with open(heatmap_fp, 'r') as fh:
heatmap_contents = fh.read()
self.assertIn('Heatmap -', heatmap_contents)
svg_fp = os.path.join(viz_dir, 'heatmap.svg')
self.assertTrue(os.path.exists(svg_fp))
with open(svg_fp, 'r') as fh:
svg_contents = fh.read()
self.assertIn('Mantel correlation', svg_contents)
self.assertIn('Iteration', svg_contents)
test_statistics = {'spearman': "Spearman's rho",
'pearson': "Pearson's r"}
self.assertIn(test_statistics[correlation_method], svg_contents)
tsv_fp = os.path.join(viz_dir, 'rarefaction-iteration-correlation.tsv')
self.assertTrue(os.path.exists(tsv_fp))
with open(tsv_fp, 'r') as fh:
tsv_contents = fh.read()
self.assertIn(correlation_method, tsv_contents)
# TSV has a header line and trailing newline, substract 2 to get the
# number of data lines in the file. Each data line represents a
# pairwise comparison; assert the number of comparisons is equal to
# nCr (n Choose r), where n=`iterations` and r=2.
self.assertEqual(len(tsv_contents.split('\n')) - 2,
scipy.special.comb(iterations, 2))
def check_clustering(self, viz_dir, clustering_method):
cluster_fp = os.path.join(viz_dir, 'tree.html')
self.assertTrue(os.path.exists(cluster_fp))
newick_fp = os.path.join(
viz_dir, 'sample-clustering-%s.tre' % clustering_method)
self.assertTrue(os.path.exists(newick_fp))
tree = skbio.TreeNode.read(newick_fp)
self.assertEqual({t.name for t in tree.tips()}, {'S1', 'S2', 'S3'})
def check_emperor(self, viz_dir):
emperor_tab_fp = os.path.join(viz_dir, 'emperor.html')
self.assertTrue(os.path.exists(emperor_tab_fp))
emperor_dir = os.path.join(viz_dir, 'emperor')
self.assertTrue(os.path.isdir(emperor_dir))
def assertBetaRarefactionValidity(self, viz_dir, iterations,
correlation_method, clustering_method):
self.check_heatmap(viz_dir, iterations, correlation_method)
self.check_clustering(viz_dir, clustering_method)
self.check_emperor(viz_dir)
def test_beta_rarefaction_with_phylogeny(self):
beta_rarefaction(self.output_dir, self.table,
'weighted_unifrac',
'upgma', self.md, 2, phylogeny=self.tree)
self.assertBetaRarefactionValidity(
self.output_dir, 10, 'spearman', 'upgma')
def test_beta_rarefaction_without_phylogeny(self):
beta_rarefaction(self.output_dir, self.table, 'braycurtis', 'upgma',
self.md, 2)
self.assertBetaRarefactionValidity(
self.output_dir, 10, 'spearman', 'upgma')
def test_beta_rarefaction_minimum_iterations(self):
beta_rarefaction(self.output_dir, self.table, 'braycurtis', 'upgma',
self.md, 2, iterations=2)
self.assertBetaRarefactionValidity(
self.output_dir, 2, 'spearman', 'upgma')
def test_beta_rarefaction_pearson_correlation(self):
beta_rarefaction(self.output_dir, self.table, 'jaccard', 'upgma',
self.md, 2, iterations=7, correlation_method='pearson'
)
self.assertBetaRarefactionValidity(
self.output_dir, 7, 'pearson', 'upgma')
def test_beta_rarefaction_non_default_color_scheme(self):
beta_rarefaction(self.output_dir, self.table, 'euclidean', 'upgma',
self.md, 3, iterations=5, color_scheme='PiYG')
self.assertBetaRarefactionValidity(
self.output_dir, 5, 'spearman', 'upgma')
def test_beta_rarefaction_neighbor_joining(self):
beta_rarefaction(self.output_dir, self.table, 'euclidean', 'nj',
self.md, 3, iterations=5, color_scheme='PiYG')
self.assertBetaRarefactionValidity(
self.output_dir, 5, 'spearman', 'nj')
def test_beta_rarefaction_empty_table(self):
table = Table(np.array([[]]), [], [])
with self.assertRaisesRegex(ValueError, 'feature table is empty'):
beta_rarefaction(self.output_dir, table, 'braycurtis', 'upgma',
self.md, 1)
def test_beta_rarefaction_all_samples_dropped(self):
with self.assertRaisesRegex(ValueError,
'shallow enough sampling depth'):
beta_rarefaction(self.output_dir, self.table, 'braycurtis',
'upgma', self.md, 100)
def test_beta_rarefaction_too_many_samples_dropped(self):
# mantel needs 3x3 or larger distance matrix
table = Table(np.array([[0, 1, 3], [1, 1, 2]]),
['O1', 'O2'], ['S1', 'S2', 'S3'])
with self.assertRaisesRegex(ValueError, '3x3 in size'):
beta_rarefaction(self.output_dir, table, 'braycurtis', 'upgma',
self.md, 2)
def test_beta_rarefaction_missing_phylogeny(self):
with self.assertRaisesRegex(ValueError, 'Phylogeny must be provided'):
beta_rarefaction(self.output_dir, self.table,
'weighted_unifrac',
'upgma', self.md, 2)
class GetMultipleRarefactionTests(SharedSetup, TestPluginBase):
package = 'q2_diversity.tests'
def test_with_phylogeny(self):
with qiime2.sdk.Context() as scope:
table = qiime2.Artifact.import_data('FeatureTable[Frequency]',
self.table)
tree = qiime2.Artifact.import_data('Phylogeny[Rooted]',
self.tree)
api_method = scope.ctx.get_action('diversity', 'beta_phylogenetic')
beta_func = functools.partial(api_method, phylogeny=tree)
rare_func = scope.ctx.get_action('feature-table', 'rarefy')
for iterations in range(1, 4):
obs_dms = _get_multiple_rarefaction(
beta_func, rare_func, 'weighted_unifrac',
iterations, table, 2)
self.assertEqual(len(obs_dms), iterations)
for obs in obs_dms:
self.assertEqual(obs.shape, (3, 3))
self.assertEqual(set(obs.ids), set(['S1', 'S2', 'S3']))
def test_without_phylogeny(self):
with qiime2.sdk.Context() as scope:
table = qiime2.Artifact.import_data('FeatureTable[Frequency]',
self.table)
beta_func = scope.ctx.get_action('diversity', 'beta')
rare_func = scope.ctx.get_action('feature-table', 'rarefy')
for iterations in range(1, 4):
obs_dms = _get_multiple_rarefaction(beta_func, rare_func,
'braycurtis', iterations,
table, 2)
self.assertEqual(len(obs_dms), iterations)
for obs in obs_dms:
self.assertEqual(obs.shape, (3, 3))
self.assertEqual(set(obs.ids), set(['S1', 'S2', 'S3']))
class UPGMATests(unittest.TestCase):
# The translation between skbio and scipy is a little spooky, so these
# tests just confirm that the ids don't get jumbled along the way
def test_ids_retained(self):
# This makes a very simple (and comb-like) UPGMA tree
dm = skbio.DistanceMatrix(
[[0, 1, 3, 5],
[1, 0, 7, 9],
[3, 7, 0, 11],
[5, 9, 11, 0]],
ids=['a', 'b', 'c', 'd'])
tree = _upgma(dm)
# Nodes exist
a = tree.find('a')
b = tree.find('b')
c = tree.find('c')
d = tree.find('d')
# Check topology quickly. If the IDs were flipped or wrong, these
# checks would fail, as we're starting from the tips and working to the
# root.
self.assertIs(a.parent, b.parent)
self.assertIs(b.parent.parent, c.parent)
self.assertIs(c.parent.parent, d.parent)
self.assertIs(d.parent.parent, None)
class ClusterSamplesTests(unittest.TestCase):
def setUp(self):
self.dm = skbio.DistanceMatrix([[0, 1, 2.1], [1, 0, 3], [2.1, 3, 0]],
ids=['S1', 'S2', 'S3'])
# Since support is traditionally held as the name, we'll use only two
# trees since 1/2 has an exact floating point representation and will
# look like `"0.5"` on any machine.
self.support = [
skbio.DistanceMatrix([[0, 1.1, 2], [1.1, 0, 3], [2, 3, 0]],
ids=['S1', 'S2', 'S3']),
skbio.DistanceMatrix([[0, 2, 3.1], [2, 0, 1], [3.1, 1, 0]],
ids=['S1', 'S2', 'S3'])
]
def test_nj_support(self):
result = _cluster_samples(self.dm, self.support, 'nj')
s1 = result.find('S1')
s2 = result.find('S2')
s3 = result.find('S3')
npt.assert_almost_equal(s1.length, 0.05)
npt.assert_almost_equal(s2.length, 0.95)
self.assertIs(s1.parent, s2.parent)
s1_s2 = s1.parent
self.assertEqual(s1_s2.name, '0.5') # half of the support agrees
npt.assert_almost_equal(s1_s2.length, 1)
npt.assert_almost_equal(s3.length, 1.05)
self.assertIs(s3.parent, s1_s2.parent)
self.assertIs(s3.parent.name, 'root') # root support is pointless
def test_upgma_support(self):
result = _cluster_samples(self.dm, self.support, 'upgma')
s1 = result.find('S1')
s2 = result.find('S2')
s3 = result.find('S3')
npt.assert_almost_equal(s1.length, 0.5)
npt.assert_almost_equal(s2.length, 0.5) # unimetric tree!
self.assertIs(s1.parent, s2.parent)
s1_s2 = s1.parent
self.assertEqual(s1_s2.name, '0.5') # half of the support agrees
npt.assert_almost_equal(s1_s2.length, 0.775)
npt.assert_almost_equal(s3.length, 1.275)
self.assertIs(s3.parent, s1_s2.parent)
self.assertIs(s3.parent.name, 'root') # root support is pointless
class AddSupportCountTests(unittest.TestCase):
def test_same_topology(self):
p = skbio.TreeNode.read(['((a,b),c);'])
s = skbio.TreeNode.read(['((a,b),c);'])
internal = list(p.non_tips())
for n in internal:
n.support_count = 0
_add_support_count(internal, s)
for n in internal:
self.assertEqual(n.support_count, 1)
def test_differing_topology(self):
p = skbio.TreeNode.read(['(((a,b),c),d);'])
s = skbio.TreeNode.read(['(((a,c),b),d);'])
internal = list(p.non_tips())
for n in internal:
n.support_count = 0
_add_support_count(internal, s)
a_b = p.find('a').parent
a_b_c = a_b.parent
self.assertEqual(a_b.support_count, 0)
self.assertEqual(a_b_c.support_count, 1)
def test_extra_node(self):
p = skbio.TreeNode.read(['(((a,b),c),d);'])
s = skbio.TreeNode.read(['((a,b),(c,d));'])
# The first node that has a, b, and c, also has d as a descendant
internal = list(p.non_tips())
for n in internal:
n.support_count = 0
_add_support_count(internal, s)
a_b = p.find('a').parent
a_b_c = a_b.parent
self.assertEqual(a_b.support_count, 1)
self.assertEqual(a_b_c.support_count, 0)
def test_multiple_calls_with_outgroup(self):
p = skbio.TreeNode.read(['((((a,b),c),d),e);'])
s1 = skbio.TreeNode.read(['(((c,b),(a,d)),e);'])
s2 = skbio.TreeNode.read(['((((a,c),b),d),e);'])
# e is the outgroup here
internal = list(p.non_tips())
for n in internal:
n.support_count = 0
_add_support_count(internal, s1)
_add_support_count(internal, s2)
a_b = p.find('a').parent
a_b_c = a_b.parent
a_b_c_d = a_b_c.parent
self.assertEqual(a_b.support_count, 0)
self.assertEqual(a_b_c.support_count, 1)
self.assertEqual(a_b_c_d.support_count, 2)
class JackknifedEmperorTests(SharedSetup, unittest.TestCase):
def test_simple(self):
dm = skbio.DistanceMatrix([[0, 1, 2], [1, 0, 3], [2, 3, 0]],
ids=['S1', 'S2', 'S3'])
j1 = skbio.DistanceMatrix([[0, 1.1, 2], [1.1, 0, 3], [2, 3, 0]],
ids=['S1', 'S2', 'S3'])
j2 = skbio.DistanceMatrix([[0, 1, 2], [1, 0, 3.1], [2, 3.1, 0]],
ids=['S1', 'S2', 'S3'])
j3 = skbio.DistanceMatrix([[0, 1.1, 1.9], [1.1, 0, 3], [1.9, 3, 0]],
ids=['S1', 'S2', 'S3'])
e = _jackknifed_emperor(dm, [j1, j2, j3], self.md)
self.assertEqual(len(e.jackknifed), 3)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
karoldvl/echonet | echonet/datasets/esc_original.py | 1 | 6114 | # -*- coding: utf-8 -*-
"""Dataset wrapper for the ESC dataset.
This wrapper tries to mostly replicate the exact setup in the original paper.
Notable exceptions when compared to the original:
- training batches are provided by a perpetual generator which augments the data by randomly
time-shifting segments on-the-fly instead of a limited number of pre-generated augmentations
- silent segments are not discarded in training/testing
"""
import os
import librosa
import numpy as np
import pandas as pd
import scipy.signal
from tqdm import tqdm
from echonet.datasets.dataset import Dataset
from echonet.utils.generics import generate_delta, load_audio, to_one_hot
class OriginalESC(Dataset):
"""
"""
def __init__(self, data_dir, work_dir, train_folds, validation_folds, test_folds, esc10=False):
super().__init__(data_dir, work_dir)
self.meta = pd.read_csv(data_dir + 'esc50.csv')
self.train_folds = train_folds
self.validation_folds = validation_folds
self.test_folds = test_folds
self.class_count = 50
self.bands = 60
self.segment_length = 101
self.esc10 = esc10
if self.esc10:
self.class_count = 10
self.meta = self.meta[self.meta['esc10']]
self.categories = pd.unique(self.meta.sort_values('target')['category'])
self.meta['target'] = self.to_targets(self.meta['category'])
else:
self.categories = pd.unique(self.meta.sort_values('target')['category'])
self.train_meta = self.meta[self.meta['fold'].isin(self.train_folds)]
self.validation_data.meta = self.meta[self.meta['fold'].isin(self.validation_folds)]
self.test_data.meta = self.meta[self.meta['fold'].isin(self.test_folds)]
self._validation_size = len(self.validation_data.meta)
self._test_size = len(self.test_data.meta)
self._generate_spectrograms()
self._populate(self.validation_data)
self._populate(self.test_data)
def _generate_spectrograms(self):
for row in tqdm(self.meta.itertuples(), total=len(self.meta)):
specfile = self.work_dir + row.filename + '.orig.spec.npy'
if os.path.exists(specfile):
continue
audio = load_audio(self.data_dir + 'audio/' + row.filename, 22050)
audio *= 1.0 / np.max(np.abs(audio))
spec = librosa.feature.melspectrogram(audio, sr=22050, n_fft=1024,
hop_length=512, n_mels=self.bands)
spec = librosa.logamplitude(spec)
np.save(specfile, spec, allow_pickle=False)
def _populate(self, data):
X, y, meta = [], [], []
for row in data.meta.itertuples():
segments = self._extract_all_segments(row.filename)
X.extend(segments)
y.extend(np.repeat(row.target, len(segments)))
values = dict(zip(row._fields[1:], row[1:]))
columns = row._fields[1:]
rows = [pd.DataFrame(values, columns=columns, index=[0]) for _ in range(len(segments))]
meta.extend(rows)
X = np.stack(X)
y = to_one_hot(np.array(y), self.class_count)
meta = pd.concat(meta, ignore_index=True)
if self.data_mean is None:
self.data_mean = np.mean(X)
self.data_std = np.std(X)
X -= self.data_mean
X /= self.data_std
data.X = X
data.y = y
data.meta = meta
def _extract_all_segments(self, filename):
spec = np.load(self.work_dir + filename + '.orig.spec.npy')
segments = []
hop_length = self.segment_length // 5
offset = 0
while offset < np.shape(spec)[1] - self.segment_length:
segment = spec[:, offset:offset + self.segment_length]
delta = generate_delta(segment)
offset += hop_length
segments.append(np.stack([segment, delta]))
return segments
@property
def input_shape(self):
return 2, self.bands, self.segment_length
@property
def train_size(self):
return len(self.train_meta)
@property
def validation_size(self):
return self._validation_size
@property
def validation_segments(self):
return len(self.validation_data.meta)
@property
def test_size(self):
return self._test_size
@property
def test_segments(self):
return len(self.test_data.meta)
def to_categories(self, targets):
return self.categories[targets]
def to_targets(self, categories):
return [np.argmax(self.categories == name) for name in categories]
def test(self, model):
return self._score(model, self.test_data)
def validate(self, model):
return self._score(model, self.validation_data)
def iterbatches(self, batch_size):
itrain = super()._iterrows(self.train_meta)
while True:
X, y = [], []
for i in range(batch_size):
row = next(itrain)
X.append(self._extract_segment(row.filename))
y.append(row.target)
X = np.stack(X)
y = to_one_hot(np.array(y), self.class_count)
X -= self.data_mean
X /= self.data_std
yield X, y
def _extract_segment(self, filename):
spec = np.load(self.work_dir + filename + '.orig.spec.npy')
offset = self.RandomState.randint(0, np.shape(spec)[1] - self.segment_length + 1)
spec = spec[:, offset:offset + self.segment_length]
delta = generate_delta(spec)
return np.stack([spec, delta])
def _score(self, model, data):
predictions = pd.DataFrame(model.predict(data.X))
results = pd.concat([data.meta[['filename', 'target']], predictions], axis=1)
results = results.groupby('filename').aggregate('mean').reset_index()
results['predicted'] = np.argmax(results.iloc[:, 2:].values, axis=1)
return np.sum(results['predicted'] == results['target']) / len(results)
| mit |
robin-lai/scikit-learn | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
JasonKessler/scattertext | scattertext/test/test_termDocMatrixFromScikit.py | 1 | 1126 | from unittest import TestCase
import numpy as np
from scattertext import TermDocMatrixFromScikit
from scattertext.indexstore import IndexStore
from scattertext.test.test_semioticSquare import get_docs_categories_semiotic
class TestTermDocMatrixFromScikit(TestCase):
def test_build(self):
from sklearn.feature_extraction.text import CountVectorizer
categories, docs = get_docs_categories_semiotic()
idx_store = IndexStore()
y = np.array([idx_store.getidx(c) for c in categories])
count_vectorizer = CountVectorizer()
X_counts = count_vectorizer.fit_transform(docs)
term_doc_mat = TermDocMatrixFromScikit(
X=X_counts,
y=y,
feature_vocabulary=count_vectorizer.vocabulary_,
category_names=idx_store.values()).build()
self.assertEqual(term_doc_mat.get_categories()[:2], ['hamlet', 'jay-z/r. kelly'])
self.assertEqual(term_doc_mat
.get_term_freq_df()
.assign(score=term_doc_mat.get_scaled_f_scores('hamlet'))
.sort_values(by='score', ascending=False).index.tolist()[:5],
['that', 'march', 'did', 'majesty', 'sometimes'])
| apache-2.0 |
huobaowangxi/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 85 | 6377 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
robbymeals/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
pwcazenave/PySeidon | pyseidon/tidegaugeClass/plotsTidegauge.py | 2 | 1096 | #!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
import seaborn
class PlotsTidegauge:
"""'Plots' subset of Tidegauge class gathers plotting functions"""
def __init__(self, variable, debug=False):
self._var = variable
def plot_xy(self, x, y, title=' ', xLabel=' ', yLabel=' '):
"""
Simple X vs Y plot
Inputs:
------
- x = 1D array
- y = 1D array
"""
fig = plt.figure(figsize=(18,10))
plt.rc('font',size='22')
self._fig = plt.plot(x, y, label=title)
scale = 1
ticks = ticker.FuncFormatter(lambda lon, pos: '{0:g}'.format(lon/scale))
plt.ylabel(yLabel)
plt.xlabel(xLabel)
#plt.legend()
plt.show()
#TR_comments: templates
# def whatever(self, debug=False):
# if debug or self._debug:
# print 'Start whatever...'
#
# if debug or self._debug:
# print '...Passed'
| agpl-3.0 |
petosegan/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
rupakc/Kaggle-Compendium | ICFHR 2012 - Arabic Writer Identification/arabic-baseline.py | 1 | 2763 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
import numpy as np
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name, ' -----------'
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
strat_fold = StratifiedKFold(n_splits=2,random_state=42)
filename = 'features_train.csv'
train_frame = pd.read_csv(filename)
class_labels = list(train_frame['File'].values)
class_labels = np.array(map(lambda s:s[:s.find('_')],class_labels))
del train_frame['File']
X,y = train_frame.values,class_labels
classifier_list, classifier_name_list = get_ensemble_models()
for train_index,test_index in strat_fold.split(X,y):
X_train,X_test = X[train_index], X[test_index]
y_train,y_test = y[train_index], y[test_index]
for classifier, classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test) | mit |
sbussmann/sensor-fusion | Code/learnvehicle.py | 1 | 4452 | """
Take a processed dataframe, generate features, add classification label, make
validation set, predict class.
"""
import pandas as pd
from scipy.ndimage import gaussian_filter
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import accuracy_score
import numpy as np
dfcar = pd.read_csv('shaneiphone_exp2_processed.csv', index_col='DateTime')
dfbus = pd.read_csv('shanebus20150827_processed.csv', index_col='DateTime')
df = pd.concat([dfcar, dfbus])
# We are allowed to use only userAcceleration and gyroscope data
xyz = ['X', 'Y', 'Z']
measures = ['userAcceleration', 'gyroscope']
features = [i + j for i in measures for j in xyz]
# Generate Gaussian smoothed features
smoothfeatures = []
for i in features:
df[i + 'sm'] = gaussian_filter(df[i], 3)
df[i + '2sm'] = gaussian_filter(df[i], 100)
smoothfeatures.append(i + 'sm')
smoothfeatures.append(i + '2sm')
features.extend(smoothfeatures)
# Generate Jerk signal
jerkfeatures = []
for i in features:
diffsignal = np.diff(df[i])
df[i + 'jerk'] = np.append(0, diffsignal)
jerkfeatures.append(i + 'jerk')
features.extend(jerkfeatures)
# Generate Fourier Transform of features
fftfeatures = []
for i in features:
reals = np.real(np.fft.rfft(df[i]))
imags = np.imag(np.fft.rfft(df[i]))
complexs = [reals[0]]
for j in range(1, reals.size - 1):
complexs.append(reals[j])
complexs.append(imags[j])
complexs.append(reals[j])
df['f' + i] = complexs
fftfeatures.append('f' + i)
features.extend(fftfeatures)
print(features)
df = df[features]
# assign class labels
car0 = (df.index > '2015-08-25 14:35:00') & \
(df.index <= '2015-08-25 14:42:00')
car1 = (df.index > '2015-08-25 14:43:00') & \
(df.index <= '2015-08-25 14:48:00')
bus0 = (df.index > '2015-08-27 10:10:00') & \
(df.index <= '2015-08-27 10:15:00')
bus1 = (df.index > '2015-08-27 10:15:00') & \
(df.index <= '2015-08-27 10:20:00')
nc = len(df)
df['class'] = np.zeros(nc) - 1
df['class'][car0] = np.zeros(nc)
df['class'][car1] = 0
df['class'][bus0] = 1
df['class'][bus1] = 1
## remove the unclassified portion of the data
#classed = df['class'] != -1
#df = df[classed]
# separate into quarters for train and validation
q1 = df[car0]
q2 = df[car1]
q3 = df[bus0]
q4 = df[bus1]
traindf = pd.concat([q2, q4])
validationdf = pd.concat([q1, q3])
# drop NaNs
traindf = traindf.dropna()
validationdf = validationdf.dropna()
X_train = traindf[traindf.columns[0:-1]].values
Y_train = traindf[traindf.columns[-1]].values
X_test = validationdf[validationdf.columns[0:-1]].values
Y_test = validationdf[validationdf.columns[-1]].values
# train a random forest
print("Beginning random forest classification")
clf = RandomForestClassifier(n_estimators=1000)
clf.fit(X_train, Y_train)
scores = cross_val_score(clf, X_train, Y_train, cv=5)
Y_test_RFC = clf.predict(X_test)
print("Results from cross-validation on training set:")
print(scores, scores.mean(), scores.std())
testscore = accuracy_score(Y_test, Y_test_RFC)
print("Accuracy score on test set: %6.3f" % testscore)
print("Feature importances:")
print(clf.feature_importances_)
import pdb; pdb.set_trace()
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
#from keras.layers.embeddings import Embedding
#from keras.layers.recurrent import LSTM
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath="testnn.hdf5", verbose=1, save_best_only=True)
model = Sequential()
# Add a mask_zero=True to the Embedding connstructor if 0 is a left-padding value in your data
max_features = 200000
model.add(Dense(X_train.shape[1], 256))
model.add(Activation('relu'))
#model.add(Embedding(X_train.shape[1], 256))
#model.add(LSTM(256, 128, activation='sigmoid', inner_activation='hard_sigmoid'))
#model.add(Dropout(0.5))
model.add(Dense(256, 128))
model.add(Activation('relu'))
model.add(Dense(128, 1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', class_mode='binary')
call_back = model.fit(X_train, Y_train, batch_size=256, nb_epoch=200,
show_accuracy=True, validation_data=[X_test, Y_test],
callbacks=[checkpointer])
#score = model.evaluate(X_test, Y_test, batch_size=256)
# In[430]:
#plt.plot(model.predict(X_test))
import pdb; pdb.set_trace()
| mit |
caidongyun/BuildingMachineLearningSystemsWithPython | ch02/heldout.py | 24 | 1377 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script demonstrates the difference between the training accuracy and
# testing (held-out) accuracy.
import numpy as np
from sklearn.datasets import load_iris
from threshold import fit_model, accuracy
data = load_iris()
features = data['data']
labels = data['target_names'][data['target']]
# We are going to remove the setosa examples as they are too easy:
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
# Now we classify virginica vs non-virginica
is_virginica = (labels == 'virginica')
# Split the data in two: testing and training
testing = np.tile([True, False], 50) # testing = [True,False,True,False,True,False...]
# Training is the negation of testing: i.e., datapoints not used for testing,
# will be used for training
training = ~testing
model = fit_model(features[training], is_virginica[training])
train_accuracy = accuracy(features[training], is_virginica[training], model)
test_accuracy = accuracy(features[testing], is_virginica[testing], model)
print('''\
Training accuracy was {0:.1%}.
Testing accuracy was {1:.1%} (N = {2}).
'''.format(train_accuracy, test_accuracy, testing.sum()))
| mit |
iancze/PSOAP | tests/test_orbit_astrometry_Gl570BC.py | 1 | 9980 | import pytest
import os
import pkg_resources
import numpy as np
from psoap import orbit_astrometry
from psoap import constants as C
import matplotlib.pyplot as plt
import matplotlib
# Create plots of all of the orbits
from astropy.io import ascii
# Create plots of all of the orbits
# If it doesn't already exist, create a "plots/basic/" directory
outdir = "tests/plots/Gl570BC/"
if not os.path.exists(outdir):
print("Creating ", outdir)
os.makedirs(outdir)
# Load the data sets for radial velocity and astrometry
rv1_fname = pkg_resources.resource_filename("psoap", "data/Gl570BC/rv1.txt")
rv2_fname = pkg_resources.resource_filename("psoap", "data/Gl570BC/rv2.txt")
astro_fname = pkg_resources.resource_filename("psoap", "data/Gl570BC/astro.txt")
rv_data_1 = ascii.read(rv1_fname)
rv_data_2 = ascii.read(rv2_fname)
astro_data = ascii.read(astro_fname)
rv_jds_A = rv_data_1["date"] + 2400000
vAs_data = rv_data_1["rv"]
vAs_err = rv_data_1["err"]
rv_jds_B = rv_data_2["date"] + 2400000
vBs_data = rv_data_2["rv"]
vBs_err = rv_data_2["err"]
astro_jds = astro_data["date"] + 2400000
rho_data = astro_data["rho"]
rho_err = astro_data["rho_err"]
theta_data = astro_data["PA"]
theta_err = astro_data["PA_err"]
def test_data():
# Make a plot of the astrometric data on the sky
fig, ax = plt.subplots(nrows=1)
xs = rho_data * np.cos(theta_data * np.pi/180)
ys = rho_data * np.sin(theta_data * np.pi/180)
ax.plot(xs, ys, ".")
ax.set_xlabel("North")
ax.set_ylabel("East")
ax.plot(0,0, "k*")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "data_astro.png")
dpc = 5.89 # pc
# Orbital elements
a = 0.1507 * dpc # [AU]
e = 0.7559
i = 107.6 # [deg]
omega = 127.56 # omega_1
# omega_2 = omega + 180.0
Omega = 195.9 + 180
T0 = 270.220 + 2450000 # [Julian Date]
M_2 = 0.390 # [M_sun]
M_tot = 0.586 + M_2 # [M_sun]
gamma = 28.665 # [km/s]
P = np.sqrt(4 * np.pi**2 / (C.G * M_tot * C.M_sun) * (a * C.AU)**3) / (24 * 3600) # [day]
# Pick a span of dates for one period
dates = np.linspace(T0, T0 + P, num=600)
# Initialize the orbit
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
full_dict = orb.get_full_orbit()
vAs, vBs, XYZ_As, XYZ_Bs, XYZ_ABs, xy_As, xy_Bs, xy_ABs = [full_dict[key] for key in ("vAs", "vBs", "XYZ_As", "XYZ_Bs", "XYZ_ABs", "xy_As", "xy_Bs", "xy_ABs")]
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
alpha_dec_As = XYZ_As/dpc # [arcsec]
alpha_dec_Bs = XYZ_Bs/dpc # [arcsec]
alpha_dec_ABs = XYZ_ABs/dpc # [arcsec]
rho_ABs = rho_ABs/dpc # [arcsec]
peri_A = orb._get_periastron_A()/dpc
peri_B = orb._get_periastron_B()/dpc
peri_BA = orb._get_periastron_BA()/dpc
asc_A = orb._get_node_A()/dpc
asc_B = orb._get_node_B()/dpc
asc_BA = orb._get_node_BA()/dpc
# Since we are plotting vs one date, we need to plot the dots using a color scale so we can figure them out along the orbit.
# Set a colorscale for the lnprobs
cmap_primary = matplotlib.cm.get_cmap("Blues")
cmap_secondary = matplotlib.cm.get_cmap("Oranges")
norm = matplotlib.colors.Normalize(vmin=np.min(dates), vmax=np.max(dates))
# Determine colors based on the ending lnprob of each walker
def plot_points(ax, dates, xs, ys, primary):
for date, x, y in zip(dates, xs, ys):
if primary:
c = cmap_primary(norm(date))
else:
c = cmap_secondary(norm(date))
ax.plot(x, y, "o", color=c, mew=0.1, ms=3, mec="k")
# Then, we will make 3D plots of the orbit so that we can square with what we think is happening.
# The final crowning grace will be a 3D matplotlib plot of the orbital path.
def test_B_rel_A():
# Plot the Orbits
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, alpha_dec_ABs[:,0], alpha_dec_ABs[:,1], False)
ax.plot(0,0, "*k", ms=2)
ax.plot(peri_BA[0], peri_BA[1], "ko", ms=3)
ax.plot(asc_BA[0], asc_BA[1], "o", color="C2", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta $ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A.png")
# Make a series of astrometric plots from different angles.
def test_AB_Z():
# Now plot A and B together, viewed from the Z axis
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,0], alpha_dec_As[:,1], True)
plot_points(ax, dates, alpha_dec_Bs[:,0], alpha_dec_Bs[:,1], False)
ax.plot(peri_A[0], peri_A[1], "ko", ms=3)
ax.plot(peri_B[0], peri_B[1], "ko", ms=3)
ax.plot(asc_A[0], asc_A[1], "^", color="C0", ms=3)
ax.plot(asc_B[0], asc_B[1], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \delta$ mas")
ax.set_ylabel(r"$\Delta \alpha \cos \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.subplots_adjust(left=0.15, right=0.85, bottom=0.15, top=0.85)
# Plot A and B together, viewed from the observer (along -Z axis).
fig.savefig(outdir + "orbit_AB_Z.png")
def test_AB_X():
# Now plot A and B together, viewed from the X axis
# This means Y will form the "X" axis, or North
# And Z will form the Y axis, or towards observer
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,1], alpha_dec_As[:,2], True)
plot_points(ax, dates, alpha_dec_Bs[:,1], alpha_dec_Bs[:,2], False)
ax.plot(peri_A[1], peri_A[2], "ko", ms=3)
ax.plot(peri_B[1], peri_B[2], "ko", ms=3)
ax.plot(asc_A[1], asc_A[2], "^", color="C0", ms=3)
ax.plot(asc_B[1], asc_B[2], "^", color="C1", ms=3)
ax.set_xlabel(r"$\Delta \alpha \cos delta$ mas")
ax.set_ylabel(r"$\Delta Z$ mas (towards observer)")
ax.axhline(0, ls=":", color="k")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_X.png")
def test_AB_Y():
# Now plot A and B together, viewed from the Y axis
# This means Z will form the "X" axis, or towards the observer
# And X will form the Y axis, or East
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
ax.plot(0,0, "ok", ms=2)
plot_points(ax, dates, alpha_dec_As[:,2], alpha_dec_As[:,0], True)
plot_points(ax, dates, alpha_dec_Bs[:,2], alpha_dec_Bs[:,0], False)
ax.plot(peri_A[2], peri_A[0], "ko", ms=3)
ax.plot(peri_B[2], peri_B[0], "ko", ms=3)
ax.plot(asc_A[2], asc_A[0], "^", color="C0", ms=3)
ax.plot(asc_B[2], asc_B[0], "^", color="C1", ms=3)
ax.axvline(0, ls=":", color="k")
ax.set_xlabel(r"$\Delta Z$ mas (towards from observer)")
ax.set_ylabel(r"$\Delta \delta$ mas")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_Y.png")
def test_vel_rho_theta_one_period():
# Plot velocities, rho, and theta as function of time for one period
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(8,8))
ax[0].plot(dates, vAs)
# ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
# ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
# ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
# ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
# ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
# ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
# ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
# ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta_one_period.png", dpi=400)
# Now make a 3D Orbit and pop it up
def test_B_rel_A_plane():
# Plot the orbits in the plane
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_ABs[:,0], xy_ABs[:,1], False)
ax.plot(0,0, "*k", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_B_rel_A_plane.png")
def test_AB_plane():
fig, ax = plt.subplots(nrows=1, figsize=(5,5))
plot_points(ax, dates, xy_As[:,0], xy_As[:,1], True)
plot_points(ax, dates, xy_Bs[:,0], xy_Bs[:,1], False)
ax.plot(0,0, "ko", ms=10)
ax.set_xlabel(r"$X$ [AU]")
ax.set_ylabel(r"$Y$ [AU]")
ax.set_aspect("equal", "datalim")
fig.savefig(outdir + "orbit_AB_plane.png")
# Redo this using a finer space series of dates spanning the full series of observations.
# Pick a span of dates for the observations
dates = np.linspace(2443500, 2452010, num=3000) # [day]
orb = orbit_astrometry.Binary(a, e, i, omega, Omega, T0, M_tot, M_2, gamma, obs_dates=dates)
polar_dict = orb.get_orbit()
vAs, vBs, rho_ABs, theta_ABs = [polar_dict[key] for key in ("vAs", "vBs", "rhos", "thetas")]
# Convert to sky coordinates, using distance
rho_ABs = rho_ABs/dpc # [arcsec]
def test_vel_rho_theta():
# Plot velocities, rho, and theta as function of time
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(12,8))
ax[0].plot(dates, vAs)
ax[0].errorbar(rv_jds_A, vAs_data, yerr=vAs_err, ls="")
ax[0].plot(rv_jds_A, vAs_data, "k.")
ax[0].set_ylabel(r"$v_A$ km/s")
ax[1].plot(dates, vBs)
ax[1].errorbar(rv_jds_B, vBs_data, yerr=vBs_err, ls="")
ax[1].plot(rv_jds_B, vBs_data, "k.")
ax[1].set_ylabel(r"$v_B$ km/s")
ax[2].plot(dates, rho_ABs)
ax[2].errorbar(astro_jds, rho_data, yerr=rho_err, ls="")
ax[2].plot(astro_jds, rho_data, "k.")
ax[2].set_ylabel(r"$\rho_\mathrm{AB}$ [mas]")
ax[3].plot(dates, theta_ABs)
ax[3].errorbar(astro_jds, theta_data, yerr=theta_err, ls="")
ax[3].plot(astro_jds, theta_data, "k.")
ax[3].set_ylabel(r"$\theta$ [deg]")
ax[-1].set_xlabel("date")
fig.savefig(outdir + "orbit_vel_rho_theta.png", dpi=400)
plt.close('all')
plt.close('all')
| mit |
smerdis/pycortex | cortex/blender.py | 2 | 4007 | import os
import struct
import numpy as np
from matplotlib import cm, colors
import bpy.ops
from bpy import context as C
from bpy import data as D
def make_object(pts, polys, name="mesh"):
mesh = D.meshes.new(name)
mesh.from_pydata(pts.tolist(), [], polys.tolist())
obj = D.objects.new(name, mesh)
C.scene.objects.link(obj)
return obj, mesh
def add_vcolor(color, mesh=None, name='color', cmap=cm.RdBu, vmin=None, vmax=None):
if mesh is None:
mesh = C.scene.objects.active.data
elif isinstance(mesh, str):
mesh = D.meshes[mesh]
bpy.ops.object.mode_set(mode='OBJECT')
if color.ndim == 1:
if vmin is None:
vmin = color.min()
if vmax is None:
vmax = color.max()
color = cmap((color - vmin) / (vmax - vmin))[:,:3]
loopidx = np.zeros((len(mesh.loops),), dtype=np.uint32)
mesh.loops.foreach_get('vertex_index', loopidx)
vcolor = mesh.vertex_colors.new(name)
for i, j in enumerate(loopidx):
vcolor.data[i].color = list(color[j])
return vcolor
def add_shapekey(shape, name=None):
bpy.ops.object.shape_key_add()
key = D.shape_keys[-1].key_blocks[-1]
if name is not None:
key.name = name
for i in range(len(key.data)):
key.data[i].co = shape[i]
return key
def cut_data(volumedata, name="retinotopy", projection="nearest", cmap=cm.RdBu, vmin=None, vmax=None, mesh="hemi"):
if isinstance(mesh, str):
mesh = D.meshes[mesh]
mapped = volumedata.map(projection)
if mapped.llen == len(mesh.vertices):
print("left hemisphere")
vcolor = mapped.left
else:
print ("right hemisphere")
vcolor = mapped.right
return add_vcolor(vcolor, mesh=mesh, name=name, cmap=cmap, vmin=vmin, vmax=vmax)
def fs_cut(subject, hemi):
from .freesurfer import get_surf
wpts, polys, curv = get_surf(subject, hemi, 'smoothwm')
ipts, _, _ = get_surf(subject, hemi, 'inflated')
obj, mesh = make_object(wpts, polys, name='hemi')
obj.scale = .1, .1, .1
C.scene.objects.active = obj
bpy.ops.object.shape_key_add()
add_vcolor(curv, mesh, vmin=-.6, vmax=.6, name='curvature')
add_shapekey(ipts, name='inflated')
obj.use_shape_key_edit_mode = True
return mesh
def write_patch(subject, hemi, name, mesh='hemi'):
from .freesurfer import get_paths, write_patch
if isinstance(mesh, str):
mesh = D.meshes[mesh]
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
C.tool_settings.mesh_select_mode = False, True, False
bpy.ops.mesh.select_non_manifold()
bpy.ops.object.mode_set(mode='OBJECT')
mwall_edge = set()
for edge in mesh.edges:
if edge.select:
mwall_edge.add(edge.vertices[0])
mwall_edge.add(edge.vertices[1])
bpy.ops.object.mode_set(mode='EDIT')
C.tool_settings.mesh_select_mode = True, False, False
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
seam = set()
for edge in mesh.edges:
if edge.use_seam:
seam.add(edge.vertices[0])
seam.add(edge.vertices[1])
edge.select = True
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_more()
bpy.ops.object.mode_set(mode='OBJECT')
smore = set()
for i, vert in enumerate(mesh.vertices):
if vert.select:
smore.add(i)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
fverts = set()
for face in mesh.polygons:
fverts.add(face.vertices[0])
fverts.add(face.vertices[1])
fverts.add(face.vertices[2])
edges = mwall_edge | (smore - seam)
verts = fverts - seam
pts = [(v, D.shape_keys['Key'].key_blocks['inflated'].data[v].co) for v in verts]
write_patch(get_paths(subject, hemi).format(name=name), pts, edges) | bsd-2-clause |
murali-munna/scikit-learn | sklearn/ensemble/tests/test_forest.py | 57 | 35265 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
| bsd-3-clause |
bharatsingh430/py-R-FCN-multiGPU | tools/train_svms.py | 16 | 13480 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
russel1237/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
DESatAPSU/DAWDs | python/reformat_mags_to_CSV_SDSS.py | 1 | 2890 | #!/usr/bin/env python
# This script takes the SDSS synthetic photometry calcphot results (fits files) from the WD models and organizes the data in a CSV file.
# This should be run in a directory contaning subdirectores of the WDs organized by name. For example:
# jacobs-air-2:DirContainingSubDirectories jacob$ ls
# SSSJ0005-0127 SSSJ0057-4914 SSSJ0206-4159 SSSJ0243p0119 SSSJ0515-3224
# This script is for the 20170515 set, modification is required on lines 17,
#!/usr/bin/env python
import numpy as np
import pandas as pd
import pyfits
import glob
import os
import glob
dirNameList = glob.glob('SSSJ?????????')
#dirNameList = ['./testcode']
for dirName in dirNameList:
print dirName
os.chdir(dirName)
filenameList = sorted(glob.glob('*.fits'))
starnameList = [(x.split('.ebv')[0])[4:] for x in filenameList]
#starnameList = [(x.split('_')[0]) for x in starnameListTemp]
output = [x.split('_') for x in starnameList]
outputFileString = output[0][0] + ".SDSS_reddened.mags.csv"
allcatdf = pd.DataFrame()
for i in range(len(filenameList)):
filename = filenameList[i]
starname = starnameList[i]
hdulist = pyfits.open(filename)
tbdata = hdulist[1].data
hdulist.close()
fnameList = tbdata['OBSMODE'].tolist()
abmagList = tbdata['COUNTRATE'].tolist()
#filterList = [(x.split(',')[1]) for fname in fnameList]
filterList = [ ((os.path.split(fname)[1]).split(',')[1])[:1] for fname in fnameList ]
#ccdList = [(fname.split('.')[-2]) for fname in fnameList]
snameList = len(filterList)*[starname]
#print filterList
try:
catdf = pd.DataFrame(np.column_stack([filterList,snameList,abmagList]), columns=['BAND', 'STARNAME','ABMAG'])
catdf.ABMAG = catdf.ABMAG.astype(float)
allcatdf = pd.concat([allcatdf,catdf])
except:
print 'Failed! %s Continuing to next filename...' % (filename)
continue
#endfor (i)
#allcatdf.loc[:,'STARNAMECCDNUM'] = allcatdf.loc[:,'STARNAME'] + '_xxx_' + allcatdf.loc[:,'CCDNUM']
allcatdf.reset_index(drop=True, inplace=True)
allcatdf2 = allcatdf.pivot_table('ABMAG', index='STARNAME', columns=['BAND'], aggfunc=sum)
allcatdf2['STARNAME'] = allcatdf2.index.astype(str)
allcatdf2['STARNAME'] = allcatdf2['STARNAME'].str.split('_xxx_').str.get(0)
#allcatdf2['CCDNUM'] = allcatdf2.index.astype(str)
#allcatdf2['CCDNUM'] = allcatdf2['CCDNUM'].str.split('_xxx_').str.get(1)
cols = ['STARNAME','u', 'g', 'r', 'i', 'z',]
allcatdf2 = allcatdf2[cols]
allcatdf2.reset_index(drop=True, inplace=True)
#allcatdf2.rename(columns = {'y':'Y', 'v':'VR'}, inplace=True)
allcatdf2.to_csv(outputFileString, index=False)
os.chdir('..')
#endfor (dirName)
exit()
| mit |
sanketloke/scikit-learn | examples/feature_selection/plot_feature_selection.py | 95 | 2847 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
krez13/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
herilalaina/scikit-learn | examples/mixture/plot_gmm_selection.py | 95 | 3310 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
zhyuey/maps | usa_map_general/usa_map.py | 1 | 2854 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import csv
import sys, os
import shapefile
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import cm
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.collections import LineCollection
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
curdir = sys.path[0] + '/'
mpl.rcParams['font.family'] = 'sans-serif'
thisblue = '#23238e'
fig = plt.figure(figsize=(11.7, 8.3))
plt.subplots_adjust(
left=0.05, right=0.95, top=0.95, bottom=0.05, wspace=0.15, hspace=0.05)
ax = plt.subplot(111)
x1 = -128.
x2 = -63.5
y1 = 24
y2 = 51
m = Basemap(resolution='i', projection='merc', llcrnrlat=y1,
urcrnrlat=y2, llcrnrlon=x1, urcrnrlon=x2)
m.fillcontinents(color='0.8')
m.drawmapboundary(fill_color= thisblue)
m.drawparallels(np.arange(y1, y2, 5.), labels=[
1, 0, 0, 0], color='black', labelstyle='+/-', linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(x1, x2, 5.), labels=[
0, 0, 0, 1], color='black', labelstyle='+/-', linewidth=0.2) # draw meridians
r = shapefile.Reader(curdir + "USA_adm1")
shapes = r.shapes()
records = r.records()
cnt = 0
for record, shape in zip(records, shapes):
print(cnt)
lons,lats = zip(*shape.points)
data = np.array(m(lons, lats)).T
if len(shape.parts) == 1:
segs = [data,]
else:
segs = []
for i in range(1,len(shape.parts)):
index = shape.parts[i-1]
index2 = shape.parts[i]
segs.append(data[index:index2])
segs.append(data[index2:])
lines = LineCollection(segs,antialiaseds=(1,))
lines.set_facecolors(np.random.rand(3, 1) * 0.5 + 0.5)
lines.set_edgecolors('k')
lines.set_linewidth(0.1)
ax.add_collection(lines)
cnt += 1
infile = open(curdir +'state_info_revised.csv','r')
csvfile = csv.reader(infile)
for lakepoly in m.lakepolygons:
lp = Polygon(lakepoly.boundary, zorder=3)
lp.set_facecolor(thisblue)
lp.set_linewidth(0.1)
ax.add_patch(lp)
for line in csvfile:
lon = (float(line[0]) + float(line[2]))/2 + float(line[5])
lat = (float(line[1]) + float(line[3]))/2 + float(line[6])
x, y = m(lon, lat)
name = line[4].replace('\\n', '\n')
plt.text(x, y, name, horizontalalignment='center', verticalalignment='center', fontsize=int(line[7]))
xx, yy = m(-72.0, 26.0)
plt.text(xx, yy, u'Made by zhyuey', color='yellow')
plt.title('Map of contiguous United States', fontsize=24)
# plt.savefig('usa_state_75.png', dpi=75)
# plt.savefig('usa_state_75.png', dpi=75)
plt.savefig('usa_state_300.png', dpi=300)
# plt.savefig('usa_state_600.png', dpi=600)
| mit |
EichlerLab/chm1_scripts | GC_content_explore.py | 2 | 8819 | #!/usr/bin/env python
import pysam
import re
import matplotlib
import matplotlib.pyplot as plt
import numpy
from pbcore.io import CmpH5Reader
from pbcore.io import CmpH5Alignment
def IdentityFromCIGAR(cigar):
nMatch = 0
nIns = 0
nDel = 0
for cig in cigar:
if (cig[0] == 0):
nMatch += cig[1]
elif(cig[0] == 1):
nIns += cig[1]
elif(cig[0] == 2):
nDel += cig[1]
denom = float(nMatch + nIns + nDel)
return nMatch / denom
class AlignmentSummary:
def __init__(self, identity, length):
self.identity = identity
self.length = length
self.zmw = 0
self.sub = 0
def SamToMap(samFileName, samMap):
sf = pysam.Samfile( samFileName, "r" )
for aln in sf.fetch():
if (aln.rname == "*"):
continue
ident = IdentityFromCIGAR(aln.cigar)
samMap[aln.qname] = AlignmentSummary(ident, aln.qlen)
def GetSubreadGC(subread):
return (float(subread.basecalls().count('G') + subread.basecalls().count('C')) / len(subread.basecalls()))
def GetGC(read):
maxLen = 0
maxS = 0
for s in range(0,len(read.subreads)):
l = len(read.subreads[s].basecalls())
if (l > maxLen):
maxLen = l
maxS = s
return (float(read.subreads[maxS].basecalls().count('G') + read.subreads[maxS].basecalls().count("C"))) / len(read.subreads[maxS].basecalls())
#dh5 = "/net/eichler/vol20/projects/pacbio/backups/incoming/130625_MYD_eee_20kb_368/D01_1/Analysis_Results/m130626_034031_42134_c100534392550000001823079711101324_s1_p0.bas.h5"
#dsam = "/net/eichler/vol20/projects/pacbio/nobackups/results/130625_MYD_eee_20kb_368/D01_1/D.sam"
dh5 = "/mnt/pacbio/D01_1/Analysis_Results/m130626_034031_42134_c100534392550000001823079711101324_s1_p0.bas.h5"
dsam = "/mnt/pacbio_analysis/D01_1/D.sam"
from pbcore.io import BasH5Reader
dReader = BasH5Reader(dh5)
#
# key:
# rs read score
# rl read length
# mi mapped identity
# ml mapped length
# m indices of mapped reads
# um indices of unmapped reads
# s mapped subreads
# us unmapped subreads
class Count:
def __init__(self):
self.fields = ["rs", "rl", "mi", "ml", "m", "um", "s", "us"]
self.data = { f: [] for f in self.fields }
self.npdata = {}
def ToNumpy(self):
self.npdata = { f: numpy.array(self.data[f]) for f in self.fields }
def StoreMapped(fileNames, alnMap, stats):
for fileName in fileNames:
reader = BasH5Reader(fileName)
for zmw in reader.sequencingZmws:
for s in reader[zmw].subreads:
stats.data["rs"].append(reader[zmw].readScore)
stats.data["rl"].append(s.readEnd - s.readStart)
if (s.readName in alnMap):
stats.data["m"].append(len(stats.data["rs"]) - 1 )
stats.data["ml"].append(alnMap[s.readName].length)
stats.data["mi"].append(alnMap[s.readName].identity)
stats.data["s"].append(s)
else:
stats.data["um"].append(len(stats.data["rs"]) - 1 )
stats.data["ml"].append(0)
stats.data["mi"].append(0)
stats.data["us"].append(s)
dfn = ["/mnt/pacbio/D01_1/Analysis_Results/m130626_034031_42134_c100534392550000001823079711101324_s1_p0.bas.h5"]
dsam = "/mnt/pacbio_analysis/D01_1/D.sam"
dcmp = "/mnt/pacbio_analysis/D01_1/D.cmp.h5"
gfn = ["/mnt/pacbio/G01_1/Analysis_Results/m130626_103730_42134_c100534392550000001823079711101327_s1_p0.bas.h5","/mnt/pacbio/G01_1/Analysis_Results/m130626_103730_42134_c100534392550000001823079711101327_s2_p0.bas.h5"]
gsam = "/mnt/pacbio_analysis/G01_1/G.sam"
hfn = ["/mnt/pacbio/H01_1/Analysis_Results/m130626_125440_42134_c100534382550000001823079711101330_s1_p0.bas.h5","/mnt/pacbio/H01_1/Analysis_Results/m130626_125440_42134_c100534382550000001823079711101330_s2_p0.bas.h5"]
hsam = "/mnt/pacbio_analysis/H01_1/H.sam"
ffn = ["/mnt/pacbio/F01_1/Analysis_Results/m130626_081902_42134_c100534392550000001823079711101326_s1_p0.bas.h5","/mnt/pacbio/F01_1/Analysis_Results/m130626_081902_42134_c100534392550000001823079711101326_s2_p0.bas.h5"]
fsam = "/mnt/pacbio_analysis/F01_1/F.sam"
dStats = Count()
dh5Files = [dh5]
dSamMap = {}
SamToMap(dsam, dSamMap)
StoreMapped(dfn, dSamMap, dStats)
dStats.ToNumpy()
fStats = Count()
fSamMap = {}
SamToMap(fsam, fSamMap)
StoreMapped(ffn, fSamMap, fStats)
fStats.ToNumpy()
gStats = Count()
gSamMap = {}
SamToMap(gsam, gSamMap)
StoreMapped(gfn, gSamMap, gStats)
gStats.ToNumpy()
hStats = Count()
hSamMap = {}
SamToMap(hsam, hSamMap)
StoreMapped(hfn, hSamMap, hStats)
hStats.ToNumpy()
def ArrayHist(array, nbins=30):
h = numpy.histogram(array, bins=nbins)
return (h[1][0:-1], h[0])
def StatsHist(stats, dataset="rs", which="m", minValue=None):
d = stats.npdata[dataset][stats.npdata[which]]
if (minValue is not None):
d = d[d > minValue]
h = numpy.histogram(d, bins=30)
return (h[1][0:-1], h[0])
dh = StatsHist(dStats, dataset="rs", which="m", minValue = 0.25)
fh = StatsHist(fStats, dataset="rs", which="m", minValue = 0.25)
duh =StatsHist(dStats, dataset="rs", which="um", minValue = 0.25)
fuh =StatsHist(fStats, dataset="rs", which="um", minValue = 0.25)
ax = plt.axes
plt.scatter(dh[0], dh[1], axes=ax)
plt.scatter(fh[0], fh[1], axes=ax, color="red")
plt.scatter(duh[0], duh[1], axes=ax, color="LightBlue")
plt.scatter(fuh[0], fuh[1], axes=ax, color="pink")
plt.show()
dCmpR = CmpH5Reader(dcmp)
mgc = numpy.array([GetSubreadGC(sr) for sr in gStats.npdata["s"]])
umgc = numpy.array([GetSubreadGC(sr) for sr in gStats.npdata["us"]])
dmgc = numpy.array([GetSubreadGC(sr) for sr in dStats.npdata["s"]])
dumgc = numpy.array([GetSubreadGC(sr) for sr in dStats.npdata["us"]])
hmgc = numpy.array([GetSubreadGC(sr) for sr in hStats.npdata["s"]])
humgc = numpy.array([GetSubreadGC(sr) for sr in hStats.npdata["us"]]
)
def GetLengths(subreads):
return numpy.array([len(sr.basecalls()) for sr in subreads])
def IMean(array, indices):
return np.mean(array[indices])
def LimitIndices(array, minValue = 0, maxValue=10000000):
lowi = array > minValue
highi = array < maxValue
return lowi & highi
hl = GetLengths(hStats.npdata["s"])
hul = GetLengths(hStats.npdata["us"])
dl = GetLengths(dStats.npdata["s"])
dul = GetLengths(dStats.npdata["us"])
gmgch = ArrayHist(mgc)
gumgch = ArrayHist(umgc)
dmgch = ArrayHist(dmgc)
dumgch = ArrayHist(umgc)
ax1 = plt.subplot(121)
ax1.scatter(dl, dmgc, color="DarkRed", alpha=0.10)
ax1.scatter(hl, hmgc, color="DarkBlue", alpha=0.10)
ax2 = plt.subplot(122)
ax2.scatter(dul, dumgc, color="HotPink", alpha=0.10)
ax2.scatter(hul, humgc, color="DodgerBlue", alpha=0.10)
plt.show()
hi = LimitIndices(hmgc, 0.1, 0.7)
hui = LimitIndices(humgc, 0.1, 0.7)
di = LimitIndices(dmgc, 0.1, 0.7)
dui = LimitIndices(dumgc, 0.1, 0.7)
hi = hl > 1000
hui = hul > 1000
di =dl > 1000
dui = dul > 1000
ax1 = plt.subplot(121)
ax1.scatter(dl[di], dmgc[di], color="DarkRed", alpha=0.10)
ax1.scatter(hl[hi], hmgc[hi], color="DarkBlue", alpha=0.10)
ax2 = plt.subplot(122)
ax2.scatter(dul[dui], dumgc[dui], color="HotPink", alpha=0.10)
ax2.scatter(hul[hui], humgc[hui], color="DodgerBlue", alpha=0.10)
plt.show()
print numpy.mean(hmgc[hi])
print numpy.mean(humgc[hui])
print numpy.mean(dmgc[di])
print numpy.mean(dumgc[dui])
def GetGCContentByLength(lens, gc, nBins = 100):
maxLength = np.max(lens)
binSize = maxLength/nBins
gcBins = [ [] for i in range(0,nBins)]
for i in range(0,len(lens)):
binIndex = min(int(lens[i]/binSize), nBins-1)
gcBins[binIndex].append(gc[i])
means = [ np.mean(gcBins[i]) if (len(gcBins[i]) > 0) else 0 for i in range(0,nBins) ]
sds = [ np.std(gcBins[i]) if (len(gcBins[i]) > 0) else 0 for i in range(0,nBins) ]
x = [ binSize * i for i in range(0,nBins) ]
return (x, np.array(means), np.array(sds))
(dx,dm,ds) = GetGCContentByLength(dl, dmgc)
(dux,dum,dus) = GetGCContentByLength(dul, dumgc)
(hx,hm,hs) = GetGCContentByLength(hl, hmgc)
(hux,hum,hus) = GetGCContentByLength(hul, humgc)
fig = plt.figure(figsize=(12,6))
ax1 = plt.subplot(121)
ax1.errorbar(hx,hm,yerr=hs, ecolor="DodgerBlue", color="blue")
ax1.errorbar(dx,dm,yerr=ds, ecolor="HotPink", color="red")
ax1.legend(("Dra1", "control"))
ax1.set(title="GC content of mapped reads by length")
ax1.axis([-1000,20000,0.1,0.7])
ax2 = plt.subplot(122)
ax2.errorbar(hux,hum,yerr=hus, ecolor="DodgerBlue", color="blue")
ax2.errorbar(dux,dum,yerr=dus, ecolor="HotPink", color="red")
ax2.set(title="GC content of unmapped reads by length")
ax2.axis([-1000,20000,0.1,0.7])
ax2.legend(("Dra1", "control"))
plt.show()
| mit |
henridwyer/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 72 | 13586 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_tfidf` function will in addition do a simple tf-idf
vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
unicef/rhizome | rhizome/api/serialize.py | 1 | 7382 | import json
import StringIO
import urlparse
from django.core.serializers import json as djangojson
from pandas import DataFrame
from tastypie.serializers import Serializer
import rhizome.models.campaign_models as c_m
from rhizome.models.indicator_models import Indicator
from rhizome.models.location_models import Location
class CustomJSONSerializer(Serializer):
"""Does not allow out of range float values
(in strict compliance with the JSON specification).
Instead replaces these values with NULL."""
formats = ['json', 'urlencode']
content_types = {
'json': 'application/json',
'urlencode': 'application/x-www-form-urlencoded',
}
def from_urlencode(self, data, options=None):
""" handles basic formencoded url posts """
qs = dict((k, v if len(v) > 1 else v[0])
for k, v in urlparse.parse_qs(data).iteritems())
return qs
def to_urlencode(self, content):
pass
def to_json(self, data, options=None):
options = options or {}
data = self.to_simple(data, options)
return djangojson.json.dumps(
data,
allow_nan=True,
cls=NanEncoder,
sort_keys=True,
ensure_ascii=False)
class NanEncoder(djangojson.DjangoJSONEncoder):
nan_str = 'null'
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = json.encoder.encode_basestring_ascii
else:
_encoder = json.encoder.encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=json.encoder.FLOAT_REPR, _inf=json.encoder.INFINITY,
_neginf=-json.encoder.INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = self.nan_str
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and json.encoder.c_make_encoder is not None
and self.indent is None and not self.sort_keys):
_iterencode = json.encoder.c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = json.encoder._make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
class CustomSerializer(Serializer):
formats = ['json', 'csv', 'urlencode']
content_types = {
'json': 'application/json',
'csv': 'text/csv',
'urlencode': 'application/x-www-form-urlencoded',
}
def from_urlencode(self, data, options=None):
""" handles basic formencoded url posts """
qs = dict((k, v if len(v) > 1 else v[0])
for k, v in urlparse.parse_qs(data).iteritems())
return qs
def to_urlencode(self, content):
pass
def to_csv(self, data, options=None):
'''
First lookup the metadata (campaign, location, indicator) and build a map
cooresponding to id / name. Afterwords iterate through the dataobjecst
passed, unpack the indicator objects and create a dataframe which gets
converted to a csv.
'''
# response['Content-Disposition'] = 'attachment; filename="somefilename.csv"'
options = options or {}
data = self.to_simple(data, options)
data_objects = data['objects']
try:
meta_lookup = self.build_meta_lookup(data_objects)
except KeyError:
# a little bit of a hack, but this is the condition that for now
# alerts the system that this is a raw csv for a document_id.
submission_data = [row['submission_json'] for row in data_objects]
return self.clean_and_prep_csv(submission_data)
expanded_objects = []
for obj in data_objects:
expanded_obj = {}
expanded_obj['location'] = meta_lookup['location'][obj['location']]
expanded_obj['campaign'] = meta_lookup['campaign'][obj['campaign']]
for ind_dict in obj['indicators']:
indicator_string = meta_lookup['indicator'][
int(ind_dict['indicator'])]
indicator_value = ind_dict['value']
expanded_obj[indicator_string] = indicator_value
expanded_objects.append(expanded_obj)
csv = self.clean_and_prep_csv(expanded_objects)
return csv
def clean_and_prep_csv(self, data_objects):
csv_df = DataFrame(data_objects)
## rearrange column order ( POLIO-200 ) ##
## http://stackoverflow.com/questions/13148429 ##
cols = csv_df.columns.tolist()
cols = cols[-2:] + cols[:-2]
csv_df = csv_df[cols]
csv = StringIO.StringIO(str(csv_df.to_csv(index=False)))
return csv
def build_meta_lookup(self, object_list):
'''
Instead of hitting the datbase every time you need to find the
string for a particular meta data item.. build a dictionary
once, store it in memory and access metadata values this way.
'''
# set up the lookup object
meta_lookup = {'location': {}, 'campaign': {}, 'indicator': {}}
# find the location and campaign ids from the object list
location_ids = [obj['location'] for obj in object_list]
campaign_ids = [obj['campaign'] for obj in object_list]
# every object has all indicators, so find the first one, and the IDs
# for each indicator in that object
indicator_nodes = [obj['indicators'] for obj in object_list]
indicator_ids = []
for ind in indicator_nodes:
indicator_ids.extend([i['indicator'] for i in ind])
for r in Location.objects.filter(id__in=location_ids):
meta_lookup['location'][r.id] = r.__unicode__()
for c in c_m.Campaign.objects.filter(id__in=campaign_ids):
meta_lookup['campaign'][c.id] = c.__unicode__()
for ind in Indicator.objects.filter(id__in=indicator_ids):
meta_lookup['indicator'][ind.id] = ind.__unicode__()
return meta_lookup
| agpl-3.0 |
jamesmf/recommenderW2V | scripts/keras/examples/kaggle_otto_nn.py | 8 | 3737 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
np.random.seed(1337) # for reproducibility
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
| gpl-2.0 |
arhik/nupic | examples/opf/tools/MirrorImageViz/mirrorImageViz.py | 50 | 7221 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.