repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mjgrav2001/scikit-learn
|
sklearn/utils/validation.py
|
67
|
24013
|
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
|
bsd-3-clause
|
kevin-coder/tensorflow-fork
|
tensorflow/contrib/metrics/python/ops/metric_ops_test.py
|
5
|
282544
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, np.int64),
np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.cached_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant(((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))),
dtype=dtype)
labels = math_ops.cast(
constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))),
dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,
))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0), (7.0, 11.0, 13.0, 17.0), (19.0, 23.0,
29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(
((0.9, 0.2, 0.8, 0.1), (0.2, 0.9, 0.7, 0.6), (0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.cached_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.cached_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples),
np.random.exponential(scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.cached_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (
_enqueue_as_batches(weights, enqueue_ops)
if weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, [
'dynamic_auc/concat_labels/array:0', 'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0', 'dynamic_auc/concat_preds/size:0'
])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2.5, -2.5, 2.5, -2.5], dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.cached_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
def testWithWeights(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
weights = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
tf_weights = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels,
tf_predictions,
weights=tf_weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.uniform(-0.2, 0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
new_weights = np.random.uniform(0.0, 3.0, size=batch_size)
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
weights = np.concatenate([weights, new_weights])
sess.run([tf_labels.assign(new_labels),
tf_predictions.assign(new_predictions),
tf_weights.assign(new_weights)])
sess.run(update_op)
expected_auc = _np_auc(predictions, labels, weights)
self.assertAlmostEqual(expected_auc, auc.eval())
class AucWithConfidenceIntervalsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A AucWithConfidenceIntervalData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.int64)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.auc_with_confidence_intervals(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testAucAllCorrect(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucUnorderedInput(self):
self._testCase(
predictions=[1.0, 0.6, 0., 0.3, 0.4, 0.2, 0.5, 0.3, 0.6, 0.8],
labels=[0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
expected_result={
'auc': 0.66666667,
'lower': 0.27826795,
'upper': 0.91208512,
})
def testAucWithWeights(self):
self._testCase(
predictions=[0., 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
weights=[0.5, 0.6, 1.2, 1.5, 2.0, 2.0, 1.5, 1.2, 0.6, 0.5],
expected_result={
'auc': 0.65151515,
'lower': 0.28918604,
'upper': 0.89573906,
})
def testAucEqualOne(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
expected_result={
'auc': 1.0,
'lower': 1.0,
'upper': 1.0,
})
def testAucEqualZero(self):
self._testCase(
predictions=[0, 0.2, 0.3, 0.3, 0.4, 0.5, 0.6, 0.6, 0.8, 1.0],
labels=[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
expected_result={
'auc': 0.0,
'lower': 0.0,
'upper': 0.0,
})
def testNonZeroOnePredictions(self):
self._testCase(
predictions=[2.5, -2.5, .5, -.5, 1],
labels=[1, 0, 1, 0, 0],
expected_result={
'auc': 0.83333333,
'lower': 0.15229267,
'upper': 0.99286517,
})
def testAllLabelsOnes(self):
self._testCase(
predictions=[1., 1., 1., 1., 1.],
labels=[1, 1, 1, 1, 1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testAllLabelsZeros(self):
self._testCase(
predictions=[0., 0., 0., 0., 0.],
labels=[0, 0, 0, 0, 0],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWeightSumLessThanOneAll(self):
self._testCase(
predictions=[1., 1., 0., 1., 0., 0.],
labels=[1, 1, 1, 0, 0, 0],
weights=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
expected_result={
'auc': 0.,
'lower': 0.,
'upper': 0.,
})
def testWithMultipleUpdates(self):
batch_size = 50
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.VariableV1(
array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.VariableV1(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.auc_with_confidence_intervals(tf_labels,
tf_predictions)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAllClose(expected_auc, auc.auc.eval())
def testExceptionOnFloatLabels(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([0.7, 0, 1, 0, 1])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertRaises(TypeError, sess.run(update_op))
def testExceptionOnGreaterThanOneLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.cached_session() as sess:
predictions = constant_op.constant([1, 0.5, 0, 1, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1, 1, 0])
_, update_op = metrics.auc_with_confidence_intervals(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result, eps=None):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
if eps is not None:
self.assertAllClose(expected_values, gotten_dict[key], atol=eps)
else:
self.assertAllClose(expected_values, gotten_dict[key])
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.cached_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {
k: value.eval().tolist()
for k, value in result._asdict().items()
}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def _testCase(self,
predictions,
labels,
expected_result,
dtype=dtypes_lib.float32,
eps=None,
weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type dtype.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
dtype: Data type to use for predictions and weights tensor. Default
is float32.
eps: Epsilon value to use for testing output values. If unspecified, use
default from assertAllClose.
weights: Optional weights tensor.
"""
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(predictions, dtype=dtype)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtype)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
self.assertEqual(gotten_result.tp.dtype, dtype)
self.assertEqual(gotten_result.fp.dtype, dtype)
self.assertEqual(gotten_result.tn.dtype, dtype)
self.assertEqual(gotten_result.fn.dtype, dtype)
self.assertEqual(gotten_result.precision.dtype, dtype)
self.assertEqual(gotten_result.recall.dtype, dtype)
self.assertEqual(gotten_result.thresholds.dtype, dtype)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result, eps=eps)
def testAllTruePositives(self):
self._testCase(
[[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase(
[[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase(
[[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase(
[[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
def testFloat64(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float64)
def testFloat16(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]], {
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
dtype=dtypes_lib.float16,
eps=1e-3)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(
tf_predictions, tf_labels, thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def _test_strict_mode(self, strict_mode, target_precision, expected_recall):
num_thresholds = 11
predictions_values = [.2, .3, .5, .6, .7, .8, .9, .9, .9, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
# Resulting thresholds and the corresponding precision and recall values at
# each threshold:
# Thresholds [0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9]
# precisions: [0.3 0.2 0.1 0 0 0 0 0 0]
# recalls: [1.0 0.7 0.3 0 0 0 0 0 0]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels,
predictions,
num_thresholds=num_thresholds,
precision=target_precision,
strict_mode=strict_mode)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_recall, sess.run(update_op))
self.assertAlmostEqual(expected_recall, recall.eval())
def testStrictMode_Off(self):
# strict_mode is turned off and return the recall at the threshold where the
# precision (0.3) is closest to target precision (0.9). The recall
# corresponding to the threshold is 1.0.
self._test_strict_mode(
strict_mode=False, target_precision=0.9, expected_recall=1.0)
def testStrictMode_OnAndFail(self):
# strict_mode is turned on and we fail to reach the target precision at any
# threshold.
# Target precision: 0.9
# Diff: [-0.6 -0.7 -0.8 -0.9 -0.9 -0.9 -0.9 -0.9 -0.9]
# Reciprocal: [-1.6 -1.4 -1.3 -1.1 -1.1 -1.1 -1.1 -1.1 -1.1]
# Max index: 3 and corresponding precision is: 0 which is smaller than
# target precsion 0.9. As a result, the expected recall is 0.
self._test_strict_mode(
strict_mode=True, target_precision=0.9, expected_recall=.0)
def testStrictMode_OnAndSucceed(self):
# strict_mode is on and we can reach the target precision at certain
# threshold.
# Target precision: 0.2
# Diff: [0.1 0 -0.1 -0.2 -0.2 -0.2 -0.2 -0.2 -0.2]
# Reciprocal: [10 infty -10.0 -5.0 -5.0 -5.0 -5.0 -5.0 -5.0]
# Max index: 1 and corresponding precision is: 0.2 which is no smaller than
# target precsion 0.2. In this case, we return the recall at index 1, which
# is 2.0/3 (0.7).
self._test_strict_mode(
strict_mode=True, target_precision=0.2, expected_recall=2.0 / 3)
class PrecisionAtRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7)
_assert_metric_variables(self,
('precision_at_recall/true_positives:0',
'precision_at_recall/false_negatives:0',
'precision_at_recall/false_positives:0',
'precision_at_recall/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision_at_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
target_recall=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertAlmostEqual(initial_precision, precision.eval(), places=5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.7)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, precision.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = 1.0 - predictions
label_prior = math_ops.reduce_mean(labels)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(sess.run(label_prior), sess.run(update_op))
self.assertEqual(sess.run(label_prior), precision.eval())
def testSomeCorrectHighRecall(self):
predictions_values = [0.1, 0.2, 0.5, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, precision.eval())
def testSomeCorrectLowRecall(self):
predictions_values = [0.1, 0.2, 0.7, 0.3, 0.0, 0.1, 0.45, 0.5, 0.6, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.4)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0/3, sess.run(update_op))
self.assertAlmostEqual(2.0/3, precision.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.22, 0.25, 0.31, 0.35]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
precision, update_op = metrics.precision_at_recall(
labels, predictions, target_recall=0.8, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(34.0/43, sess.run(update_op))
self.assertAlmostEqual(34.0/43, precision.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.cached_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.cached_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones((self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.cached_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[
0,
], [
1,
], [
2,
]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([
10,
], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [
(ex1 + ex2) / 2 for ex1, ex2 in zip(precision_ex1, precision_ex2)
]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(
shape=(2, None), dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.cached_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def _test_one_label_at_k1_weighted(self, labels):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_one_label_at_k1_weighted_sparse_labels(self):
sparse_labels = _binary_2d_label_to_sparse_value([[0, 0, 0, 1],
[0, 0, 1, 0]])
self._test_one_label_at_k1_weighted(sparse_labels)
def test_one_label_at_k1_weighted_dense_labels(self):
dense_labels = np.array([[3], [2]], dtype=np.int64)
self._test_one_label_at_k1_weighted(dense_labels)
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2], [1,
3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0], [0, 0, 0, 1]]
expected_recall = 0.5
with self.cached_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels, weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.cached_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(
predictions, labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(
predictions, labels, weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov(
[2, 4, 6, 8], [1, 3, 2, 7], fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(prev_expected_cov,
sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_cov,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_cov, sess.run(cov, feed_dict=feed_dict),
5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10]),
labels=(math_ops.cast(math_ops.range(10), dtypes_lib.float32) +
array_ops.ones([10, 10])),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.cached_session() as sess:
predictions = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
labels = math_ops.cast(math_ops.range(10), dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.cached_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions, labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(
np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(prev_expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(expected_r,
sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.cached_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n // stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(
predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(
np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(expected_r, actual_r, 5)
self.assertAlmostEqual(expected_r,
sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.cached_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.cached_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat([
constant_op.constant(0, shape=[5]),
constant_op.constant(1, shape=[5])
], 0)
labels = array_ops.concat([
constant_op.constant(0, shape=[3]),
constant_op.constant(1, shape=[7])
], 0)
num_classes = 2
weights = array_ops.concat([
constant_op.constant(0, shape=[1]),
constant_op.constant(1, shape=[8]),
constant_op.constant(0, shape=[1])
], 0)
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 2, 1, 1, 0], [0, 1, 2, 2, 0, 1]], [[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 /
(0 + 5 + 0)), miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([[[0, 0, 1, 1, 0, 0], [1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant(
[[[0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1]], [[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.cached_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)),
miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.cached_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.cached_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.cached_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.cached_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testReturnType(self):
c, op = metrics.count(array_ops.ones([4, 3]))
self.assertTrue(isinstance(c, ops.Tensor))
self.assertTrue(isinstance(op, ops.Operation) or isinstance(op, ops.Tensor))
def testBasic(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.cached_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.cached_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [
(len(labels,)), # 1-dim
(len(labels), 1)
] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.cached_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.cached_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([[9, 3, 1], [4, 8, 2], [2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.cached_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(
labels, predictions, 4, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([[90, 30, 10, 20], [40, 80, 20, 30],
[20, 10, 60, 35], [15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(
dtypes_lib.float32, shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32, shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32, shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(
update_op,
feed_dict={
labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]
})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(
dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
def testConditionalPackingOptimization(self):
placeholder = array_ops.placeholder(dtypes_lib.float32, [None])
values, update_op = metric_ops.streaming_concat(placeholder)
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for feed in range(10):
sess.run(update_op, feed_dict={placeholder: [feed]})
print(sess.run(values))
if __name__ == '__main__':
test.main()
|
apache-2.0
|
gimli-org/gimli
|
pygimli/physics/sNMR/mrs.py
|
1
|
30988
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Magnetic resonance sounding module."""
# general modules to import according to standards
import time
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
from pygimli.utils import iterateBounds
from pygimli.utils.base import gmat2numpy
from pygimli.viewer.mpl import drawModel1D
# local functions in package
from pygimli.physics.sNMR.modelling import MRS1dBlockQTModelling
from pygimli.physics.sNMR.plotting import showErrorBars, showWC, showT2
class MRS():
"""Magnetic resonance sounding (MRS) manager class.
Attributes
----------
t, q : ndarray - time and pulse moment vectors
data, error : 2d ndarray - data and error cubes
K, z : ndarray - (complex) kernel and its vertical discretization
model, modelL, modelU : vectors - model vector and lower/upper bound to it
Methods
-------
loadMRSI - load MRSI (MRSmatlab format) data
showCube - show any data/error/misfit as data cube (over q and t)
showDataAndError - show data and error cubes
showKernel - show Kernel matrix
createFOP - create forward operator
createInv - create pygimli Inversion instance
run - run block-mono (alternatively smooth-mono) inversion (with bootstrap)
calcMCM - compute model covariance matrix and thus uncertainties
splitModel - return thickness, water content and T2* time from vector
showResult/showResultAndFit - show inversion result (with fit)
runEA - run evolutionary algorithm (GA, PSO etc.) using inspyred
plotPopulation - plot final population of an EA run
"""
def __init__(self, name=None, verbose=True, **kwargs):
"""MRS init with optional data load from mrsi file
Parameters
----------
name : string
Filename with load data and kernel (*.mrsi) or just data (*.mrsd)
verbose : bool
be verbose
kwargs - see :func:`MRS.loadMRSI`.
"""
self.verbose = verbose
self.t, self.q, self.z = None, None, None
self.data, self.error = None, None
self.K, self.fop, self.INV = None, None, None
self.dcube, self.ecube = None, None
self.lLB, self.lUB = None, None
self.nlay = 0
self.model, self.modelL, self.modelU = None, None, None
self.lowerBound = [1.0, 0.0, 0.02] # d, theta, T2*
self.upperBound = [30., 0.45, 1.00] # d, theta, T2*
self.startval = [10., 0.30, 0.20] # d, theta, T2*
self.logpar = False
self.basename = 'new'
self.figs = {}
if name is not None: # load data and kernel
# check for mrsi/d/k
if name[-5:-1].lower() == '.mrs': # mrsi or mrsd
self.loadMRSI(name, **kwargs)
self.basename = name.rstrip('.mrsi')
# elif name[-5:].lower() == '.mrsd':
# self.loadMRSD(name, **kwargs)
elif name.lower().endswith('npz'):
self.loadDataNPZ(name, **kwargs)
else:
self.loadDir(name)
def __repr__(self): # for print function
"""String representation."""
out = ""
if len(self.t) > 0 and len(self.q) > 0:
out = "<MRSdata: %d qs, %d times" % \
(len(self.q), len(self.t))
if hasattr(self.z, '__iter__') and len(self.z) > 0:
out += ", %d layers" % len(self.z)
return out + ">"
def loadDataNPZ(self, filename, **kwargs):
"""Load data and kernel from numpy gzip packed file.
The npz file contains the fields: q, t, D, (E), z, K
"""
self.basename = filename.rstrip('.npz')
DATA = np.load(filename)
self.q = DATA['q']
self.t = DATA['t']
self.z = np.absolute(DATA['z'])
self.K = DATA['K']
self.dcube = DATA['D']
ndcubet = len(self.dcube[0])
if len(self.dcube) == len(self.q) and ndcubet == len(self.t):
if kwargs.pop('usereal', False):
self.data = np.real(self.dcube.flat)
else:
self.data = np.abs(self.dcube.flat)
if 'E' in DATA:
self.ecube = DATA['E']
else:
self.ecube = np.zeros_like(self.dcube)
self.checkData(**kwargs)
def loadKernelNPZ(self, filename, **kwargs):
"""Load data and kernel from numpy gzip packed file.
The npz file contains the fields: q, t, D, (E), z, K
"""
self.basename = filename.rstrip('.npz')
DATA = np.load(filename)
self.q = DATA['pulseMoments']
self.z = np.absolute(DATA['zVector'])
self.K = DATA['kernel']
def loadMRSI(self, filename, **kwargs):
"""Load data, error and kernel from mrsi or mrsd file
Parameters
----------
usereal : bool [False]
use real parts (after data rotation) instead of amplitudes
mint/maxt : float [0.0/2.0]
minimum/maximum time to restrict time series
"""
from scipy.io import loadmat # loading Matlab mat files
if filename[-5:].lower() == '.mrsd':
idata = None
pl = loadmat(filename, struct_as_record=False,
squeeze_me=True)['proclog']
self.q = np.array([q.q for q in pl.Q])
self.t = pl.Q[0].rx.sig[0].t + pl.Q[0].timing.tau_dead1
nq = len(pl.Q)
nt = len(self.t)
self.dcube = np.zeros((nq, nt))
self.ecube = np.zeros((nq, nt))
# self.ecube = np.ones((nq, nt))*20e-9
for i in range(nq):
self.dcube[i, :] = pl.Q[i].rx.sig[1].V
self.ecube[i, :] = np.real(pl.Q[i].rx.sig[1].E)
else:
idata = loadmat(filename, struct_as_record=False,
squeeze_me=True)['idata']
self.t = idata.data.t + idata.data.effDead
self.q = idata.data.q
self.K = idata.kernel.K
self.z = np.hstack((0., idata.kernel.z))
self.dcube = idata.data.dcube
self.ecube = idata.data.ecube
defaultNoise = kwargs.get("defaultNoise", 100e-9)
if self.ecube[0][0] == 0:
self.ecube = np.ones_like(self.dcube) * defaultNoise
if self.verbose:
print("no errors in file, assuming", defaultNoise*1e9, "nV")
self.ecube = np.ones((len(self.q), len(self.t))) * defaultNoise
if idata is not None:
self.ecube /= np.sqrt(idata.data.gateL)
self.checkData(**kwargs)
# load model from matlab file (result of MRSQTInversion)
if filename[-5:].lower() == '.mrsi' and hasattr(idata, 'inv1Dqt'):
if hasattr(idata.inv1Dqt, 'blockMono'):
sol = idata.inv1Dqt.blockMono.solution[0]
self.model = np.hstack((sol.thk, sol.w, sol.T2))
self.nlay = len(sol.w)
if self.verbose:
print("loaded file: " + filename)
def checkData(self, **kwargs):
"""Check data and retrieve data and error vector."""
mint = kwargs.pop('mint', 0)
maxt = kwargs.pop('maxt', 1000)
good = (self.t <= maxt) & (self.t >= mint)
self.t = self.t[good]
self.dcube = self.dcube[:, good]
self.ecube = self.ecube[:, good]
ndcubet = len(self.dcube[0])
if len(self.dcube) == len(self.q) and ndcubet == len(self.t):
if kwargs.pop('usereal', False):
self.data = np.real(self.dcube.flat)
else:
self.data = np.abs(self.dcube.flat)
else:
print('Dimensions do not match!')
necubet = len(self.dcube[0])
if len(self.ecube) == len(self.q) and necubet == len(self.t):
self.error = self.ecube.ravel()
if min(self.error) <= 0.:
print("Warning: negative errors present! Taking absolute value")
self.error = np.absolute(self.error)
defaultNoise = kwargs.pop("defaultNoise", 100e-9)
if min(self.error) == 0.:
if self.verbose:
print("Warning: zero error, assuming", defaultNoise)
self.error[self.error == 0.] = defaultNoise
# clip data if desired (using vmin and vmax keywords)
if "vmax" in kwargs:
vmax = kwargs['vmax']
self.error[self.data > vmax] = max(self.error)*3
self.data[self.data > vmax] = vmax
if "vmin" in kwargs:
vmin = kwargs['vmin']
self.error[self.data < vmin] = max(self.error)*3
self.data[self.data < vmin] = vmin
if self.verbose:
print(self)
def loadMRSD(self, filename, usereal=False, mint=0., maxt=2.0):
"""Load mrsd (MRS data) file: not really used as in MRSD."""
from scipy.io import loadmat # loading Matlab mat files
print("Currently not using mint/maxt & usereal:", mint, maxt, usereal)
pl = loadmat(filename, struct_as_record=False,
squeeze_me=True)['proclog']
self.q = np.array([q.q for q in pl.Q])
self.t = pl.Q[0].rx.sig[0].t + pl.Q[0].timing.tau_dead1
nq = len(pl.Q)
nt = len(self.t)
self.dcube = np.zeros((nq, nt))
for i in range(nq):
self.dcube[i, :] = np.abs(pl.Q[i].rx.sig[1].V)
self.ecube = np.ones((nq, nt))*20e-9
def loadDataCube(self, filename='datacube.dat'):
"""Load data cube from single ascii file (old stuff)"""
A = np.loadtxt(filename).T
self.q = A[1:, 0]
self.t = A[0, 1:]
self.data = A[1:, 1:].ravel()
def loadErrorCube(self, filename='errorcube.dat'):
"""Load error cube from a single ascii file (old stuff)."""
A = np.loadtxt(filename).T
if len(A) == len(self.q) and len(A[0]) == len(self.t):
self.error = A.ravel()
elif len(A) == len(self.q) + 1 and len(A[0]) == len(self.t) + 1:
self.error = A[1:, 1:].ravel()
else:
self.error = np.ones(len(self.q) * len(self.t)) * 100e-9
def loadKernel(self, name=''):
"""Load kernel matrix from mrsk or two bmat files."""
from scipy.io import loadmat # loading Matlab mat files
if name[-5:].lower() == '.mrsk':
kdata = loadmat(name, struct_as_record=False,
squeeze_me=True)['kdata']
self.K = kdata.K
self.z = np.hstack((0., kdata.model.z))
else: # try load real/imag parts (backward compat.)
KR = pg.Matrix(name + 'KR.bmat')
KI = pg.Matrix(name + 'KI.bmat')
self.K = np.zeros((KR.rows(), KR.cols()), dtype='complex')
for i in range(KR.rows()):
self.K[i] = np.array(KR[i]) + np.array(KI[i]) * 1j
def loadZVector(self, filename='zkernel.vec'):
"""Load the kernel vertical discretisation (z) vector."""
self.z = pg.Vector(filename)
def loadDir(self, dirname):
"""Load several standard files from dir (old Borkum stage)."""
if not dirname[-1] == '/':
dirname += '/'
self.loadDataCube(dirname + 'datacube.dat')
self.loadErrorCube(dirname + 'errorcube.dat')
self.loadKernel(dirname)
self.loadZVector(dirname + 'zkernel.vec')
self.dirname = dirname # to save results etc.
def showCube(self, ax=None, vec=None, islog=None, clim=None, clab=None):
"""Plot any data (or response, error, misfit) cube nicely."""
if vec is None:
vec = np.array(self.data).flat
print(len(vec))
mul = 1.0
if max(vec) < 1e-3: # Volts
mul = 1e9
if ax is None:
_, ax = plt.subplots(1, 1)
if islog is None:
print(len(vec))
islog = (min(vec) > 0.)
negative = (min(vec) < 0)
if islog:
vec = np.log10(np.abs(vec))
if clim is None:
if negative:
cmax = max(max(vec), -min(vec))
clim = (-cmax, cmax)
else:
cmax = max(vec)
if islog:
cmin = cmax - 1.5
else:
cmin = 0.
clim = (cmin, cmax)
xt = range(0, len(self.t), 10)
xtl = [str(ti) for ti in np.round(self.t[xt] * 1000.)]
qt = range(0, len(self.q), 5)
qtl = [str(qi) for qi in np.round(np.asarray(self.q)[qt] * 10.) / 10.]
mat = np.array(vec).reshape((len(self.q), len(self.t)))*mul
im = ax.imshow(mat, interpolation='nearest', aspect='auto')
im.set_clim(clim)
ax.set_xticks(xt)
ax.set_xticklabels(xtl)
ax.set_yticks(qt)
ax.set_yticklabels(qtl)
ax.set_xlabel('$t$ [ms]')
ax.set_ylabel('$q$ [As]')
cb = plt.colorbar(im, ax=ax, orientation='horizontal')
if clab is not None:
cb.ax.set_title(clab)
return clim
def showDataAndError(self, figsize=(10, 8), show=False):
"""Show data cube along with error cube."""
fig, ax = plt.subplots(1, 2, figsize=figsize)
self.showCube(ax[0], self.data * 1e9, islog=False)
self.showCube(ax[1], self.error * 1e9, islog=True)
if show:
plt.show()
self.figs['data+error'] = fig
return fig, ax
def showKernel(self, ax=None):
"""Show the kernel as matrix (Q over z)."""
if ax is None:
fig, ax = plt.subplots()
self.figs['kernel'] = fig
# ax.imshow(self.K.T, interpolation='nearest', aspect='auto')
ax.matshow(self.K.T, aspect='auto')
yt = ax.get_yticks()
maxzi = self.K.shape[1]
yt = yt[(yt >= 0) & (yt < maxzi)]
if yt[-1] < maxzi-2:
yt = np.hstack((yt, maxzi))
ytl = [str(self.z[int(yti)]) for yti in yt]
zl = self.z[[int(yti) for yti in yt]]
ytl = [str(zi) for zi in np.round(zl, 1)]
ax.set_yticks(yt)
ax.set_yticklabels(ytl)
xt = ax.get_xticks()
maxqi = self.K.shape[0]
xt = xt[(xt >= 0) & (xt < maxqi)]
xtl = [np.round(self.q[iq], 2) for iq in xt]
ax.set_xticks(xt)
ax.set_xticklabels(xtl)
return fig, ax
@staticmethod
def createFOP(nlay, K, z, t): # , verbose=True, **kwargs):
"""Create forward operator instance."""
fop = MRS1dBlockQTModelling(nlay, K, z, t)
return fop
def setBoundaries(self):
"""Set parameter boundaries for inversion."""
for i in range(3):
self.fop.region(i).setParameters(self.startval[i],
self.lowerBound[i],
self.upperBound[i], "log")
def createInv(self, nlay=3, lam=100., verbose=True, **kwargs):
"""Create inversion instance (and fop if necessary with nlay)."""
self.fop = MRS.createFOP(nlay, self.K, self.z, self.t)
self.setBoundaries()
self.INV = pg.Inversion(self.data, self.fop, verbose)
self.INV.setLambda(lam)
self.INV.setMarquardtScheme(kwargs.pop('lambdaFactor', 0.8))
self.INV.stopAtChi1(False) # now in MarquardtScheme
self.INV.setDeltaPhiAbortPercent(0.5)
self.INV.setAbsoluteError(np.abs(self.error))
self.INV.setRobustData(kwargs.pop('robust', False))
return self.INV
@staticmethod
def simulate(model, K, z, t):
"""Do synthetic modelling."""
nlay = int(len(model) / 3) + 1
fop = MRS.createFOP(nlay, K, z, t)
return fop.response(model)
def invert(self, nlay=3, lam=100., startvec=None,
verbose=True, uncertainty=False, **kwargs):
"""Easiest variant doing all (create fop and inv) in one call."""
if self.INV is None or self.nlay != nlay:
self.INV = self.createInv(nlay, lam, verbose, **kwargs)
self.INV.setVerbose(verbose)
if startvec is not None:
self.INV.setModel(startvec)
if verbose:
print("Doing inversion...")
self.model = np.array(self.INV.run())
return self.model
def run(self, verbose=True, uncertainty=False, **kwargs):
"""Easiest variant doing all (create fop and inv) in one call."""
self.invert(verbose=verbose, **kwargs)
if uncertainty:
if verbose:
print("Computing uncertainty...")
self.modelL, self.modelU = iterateBounds(
self.INV, dchi2=self.INV.chi2() / 2, change=1.2)
if verbose:
print("ready")
def splitModel(self, model=None):
"""Split model vector into d, theta and T2*."""
if model is None:
model = self.model
nl = int(len(self.model)/3) + 1 # self.nlay
thk = model[:nl - 1]
wc = model[nl - 1:2 * nl - 1]
t2 = model[2 * nl - 1:3 * nl - 1]
return thk, wc, t2
def result(self):
"""Return block model results (thk, wc and T2 vectors)."""
return self.splitModel()
def showResult(self, figsize=(10, 8), save='', fig=None, ax=None):
"""Show theta(z) and T2*(z) (+uncertainties if there)."""
if ax is None:
fig, ax = plt.subplots(1, 2, sharey=True, figsize=figsize)
self.figs['result'] = fig
thk, wc, t2 = self.splitModel()
showWC(ax[0], thk, wc)
showT2(ax[1], thk, t2)
if self.modelL is not None and self.modelU is not None:
thkL, wcL, t2L = self.splitModel(self.modelL)
thkU, wcU, t2U = self.splitModel(self.modelU)
showErrorBars(ax[0], thk, wc, thkL, thkU, wcL, wcU)
showErrorBars(ax[1], thk, t2*1e3, thkL, thkU, t2L*1e3, t2U*1e3)
if fig is not None:
if save:
fig.savefig(save, bbox_inches='tight')
return fig, ax
def showResultAndFit(self, figsize=(12, 10), save='', plotmisfit=False,
maxdep=0, clim=None):
"""Show ec(z), T2*(z), data and model response."""
fig, ax = plt.subplots(2, 2 + plotmisfit, figsize=figsize)
self.figs['result+fit'] = fig
thk, wc, t2 = self.splitModel()
showWC(ax[0, 0], thk, wc, maxdep=maxdep)
showT2(ax[0, 1], thk, t2, maxdep=maxdep)
ax[0, 0].set_title(r'MRS water content $\theta$')
ax[0, 1].set_title(r'MRS decay time $T_2^*$')
ax[0, 0].set_ylabel('$z$ [m]')
ax[0, 1].set_ylabel('$z$ [m]')
if self.modelL is not None and self.modelU is not None:
thkL, wcL, t2L = self.splitModel(self.modelL)
thkU, wcU, t2U = self.splitModel(self.modelU)
showErrorBars(ax[0, 0], thk, wc, thkL, thkU, wcL, wcU)
showErrorBars(ax[0, 1], thk, t2*1e3, thkL, thkU, t2L*1e3, t2U*1e3)
if maxdep > 0.:
ax[0, 0].set_ylim([maxdep, 0.])
ax[0, 1].set_ylim([maxdep, 0.])
clim = self.showCube(ax[1, 0], self.data * 1e9, islog=False, clim=clim)
ax[1, 0].set_title('measured data [nV]') # log10
self.showCube(
ax[1, 1], self.INV.response() * 1e9, clim=clim, islog=False)
ax[1, 1].set_title('simulated data [nV]') # log10
if plotmisfit:
self.showCube(ax[0, 2], (self.data - self.INV.response()) * 1e9,
islog=False)
ax[0, 2].set_title('misfit [nV]') # log10
ewmisfit = (self.data - self.INV.response()) / self.error
self.showCube(ax[1, 2], ewmisfit, islog=False)
ax[1, 2].set_title('error-weighted misfit')
if save:
if not isinstance(save, str):
save = self.basename
fig.savefig(save, bbox_inches='tight')
return fig, ax
def saveResult(self, filename):
"""Save inversion result to column text file for later use."""
thk, wc, t2 = self.splitModel()
z = np.hstack((0., np.cumsum(thk)))
ALL = np.column_stack((z, wc, t2))
if self.modelL is not None and self.modelU is not None:
thkL, wcL, t2L = self.splitModel(self.modelL)
thkU, wcU, t2U = self.splitModel(self.modelU)
zL = z.copy()
zL[1:] += (thkL - thk)
zU = z.copy()
zU[1:] += (thkU - thk)
ALL = np.column_stack((z, wc, t2, zL, zU, wcL, wcU, t2L, t2U))
np.savetxt(filename, ALL, fmt='%.3f')
def loadResult(self, filename):
"""Load inversion result from column file."""
A = np.loadtxt(filename)
z, wc, t2 = A[:, 0], A[:, 1], A[:, 2]
thk = np.diff(z)
self.nlay = len(wc)
self.model = np.hstack((thk, wc, t2))
if len(A[0]) > 8:
zL, wcL, t2L = A[:, 3], A[:, 5], A[:, 7]
zU, wcU, t2U = A[:, 4], A[:, 6], A[:, 8]
thkL = thk + zL[1:] - z[1:]
thkU = thk + zU[1:] - z[1:]
t2L[t2L < 0.01] = 0.01
self.modelL = np.hstack((thkL, wcL, t2L))
t2U[t2U > 1.0] = 1.0
self.modelU = np.hstack((thkU, wcU, t2U))
def calcMCM(self):
"""Compute linear model covariance matrix."""
J = gmat2numpy(self.fop.jacobian()) # (linear) jacobian matrix
D = np.diag(1 / self.error)
DJ = D.dot(J)
JTJ = DJ.T.dot(DJ)
MCM = np.linalg.inv(JTJ) # model covariance matrix
var = np.sqrt(np.diag(MCM)) # standard deviations from main diagonal
di = (1. / var) # variances as column vector
# scaled model covariance (=correlation) matrix
MCMs = di.reshape(len(di), 1) * MCM * di
return var, MCMs
def calcMCMbounds(self):
"""Compute model bounds using covariance matrix diagonals."""
mcm = self.calcMCM()[0]
self.modelL = self.model - mcm
self.modelU = self.model + mcm
def genMod(self, individual):
"""Generate (GA) model from random vector (0-1) using model bounds."""
model = np.asarray(individual) * (self.lUB - self.lLB) + self.lLB
if self.logpar:
return pg.exp(model)
else:
return model
def runEA(self, nlay=None, eatype='GA', pop_size=100, num_gen=100,
runs=1, mp_num_cpus=8, **kwargs):
"""Run evolutionary algorithm using the inspyred library
Parameters
----------
nlay : int [taken from classic fop if not given]
number of layers
pop_size : int [100]
population size
num_gen : int [100]
number of generations
runs : int [pop_size*num_gen]
number of independent runs (with random population)
eatype : string ['GA']
algorithm, choose among:
'GA' - Genetic Algorithm [default]
'SA' - Simulated Annealing
'DEA' - Discrete Evolutionary Algorithm
'PSO' - Particle Swarm Optimization
'ACS' - Ant Colony Strategy
'ES' - Evolutionary Strategy
"""
import inspyred
import random
def mygenerate(random, args):
"""generate a random vector of model size"""
return [random.random() for i in range(nlay * 3 - 1)]
def my_observer(population, num_generations, num_evaluations, args):
""" print fitness over generation number """
best = min(population)
print('{0:6} -- {1}'.format(num_generations, best.fitness))
@inspyred.ec.evaluators.evaluator
def datafit(individual, args):
""" error-weighted data misfit as basis for evaluating fitness """
misfit = (self.data -
self.fop.response(self.genMod(individual))) / self.error
return np.mean(misfit**2)
# prepare forward operator
if self.fop is None or (nlay is not None and nlay is not self.nlay):
self.fop = MRS.createFOP(nlay)
lowerBound = pg.cat(pg.cat(pg.Vector(self.nlay - 1,
self.lowerBound[0]),
pg.Vector(self.nlay, self.lowerBound[1])),
pg.Vector(self.nlay, self.lowerBound[2]))
upperBound = pg.cat(pg.cat(pg.Vector(self.nlay - 1,
self.upperBound[0]),
pg.Vector(self.nlay, self.upperBound[1])),
pg.Vector(self.nlay, self.upperBound[2]))
if self.logpar:
self.lLB, self.lUB = pg.log(lowerBound), pg.log(
upperBound) # ready mapping functions
else:
self.lLB, self.lUB = lowerBound, upperBound
# self.f = MRS1dBlockQTModelling(nlay, self.K, self.z, self.t)
# setup random generator
rand = random.Random()
# choose among different evolution algorithms
if eatype == 'GA':
ea = inspyred.ec.GA(rand)
ea.variator = [
inspyred.ec.variators.blend_crossover,
inspyred.ec.variators.gaussian_mutation]
ea.selector = inspyred.ec.selectors.tournament_selection
ea.replacer = inspyred.ec.replacers.generational_replacement
if eatype == 'SA':
ea = inspyred.ec.SA(rand)
if eatype == 'DEA':
ea = inspyred.ec.DEA(rand)
if eatype == 'PSO':
ea = inspyred.swarm.PSO(rand)
if eatype == 'ACS':
ea = inspyred.swarm.ACS(rand, [])
if eatype == 'ES':
ea = inspyred.ec.ES(rand)
ea.terminator = [inspyred.ec.terminators.evaluation_termination,
inspyred.ec.terminators.diversity_termination]
else:
ea.terminator = inspyred.ec.terminators.evaluation_termination
# ea.observer = my_observer
ea.observer = [
inspyred.ec.observers.stats_observer,
inspyred.ec.observers.file_observer]
tstr = '{0}'.format(time.strftime('%y%m%d-%H%M%S'))
self.EAstatfile = self.basename + '-' + eatype + 'stat' + tstr + '.csv'
with open(self.EAstatfile, 'w') as fid:
self.pop = []
for i in range(runs):
rand.seed(int(time.time()))
self.pop.extend(ea.evolve(
evaluator=datafit, generator=mygenerate, maximize=False,
pop_size=pop_size, max_evaluations=pop_size*num_gen,
bounder=inspyred.ec.Bounder(0., 1.), num_elites=1,
statistics_file=fid, **kwargs))
# self.pop.extend(ea.evolve(
# generator=mygenerate, maximize=False,
# evaluator=inspyred.ec.evaluators.parallel_evaluation_mp,
# mp_evaluator=datafit, mp_num_cpus=mp_num_cpus,
# pop_size=pop_size, max_evaluations=pop_size*num_gen,
# bounder=inspyred.ec.Bounder(0., 1.), num_elites=1,
# statistics_file=fid, **kwargs))
self.pop.sort(reverse=True)
self.fits = [ind.fitness for ind in self.pop]
print('minimum fitness of ' + str(min(self.fits)))
def plotPopulation(self, maxfitness=None, fitratio=1.05, savefile=True):
"""Plot fittest individuals (fitness<maxfitness) as 1d models
Parameters
----------
maxfitness : float
maximum fitness value (absolute) OR
fitratio : float [1.05]
maximum ratio to minimum fitness
"""
if maxfitness is None:
maxfitness = min(self.fits) * fitratio
fig, ax = plt.subplots(1, 2, sharey=True)
self.figs['population'] = fig
maxz = 0
for ind in self.pop:
if ind.fitness < maxfitness:
model = np.asarray(self.genMod(ind.candidate))
thk = model[:self.nlay - 1]
wc = model[self.nlay - 1:self.nlay * 2 - 1]
t2 = model[self.nlay * 2 - 1:]
drawModel1D(ax[0], thk, wc * 100, color='grey')
drawModel1D(ax[1], thk, t2 * 1000, color='grey')
maxz = max(maxz, sum(thk))
model = np.asarray(self.genMod(self.pop[0].candidate))
thk = model[:self.nlay - 1]
wc = model[self.nlay - 1:self.nlay * 2 - 1]
t2 = model[self.nlay * 2 - 1:]
drawModel1D(ax[0], thk, wc * 100, color='black', linewidth=3)
drawModel1D(ax[1], thk, t2 * 1000, color='black', linewidth=3,
plotfunction='semilogx')
ax[0].set_xlim(self.lowerBound[1] * 100, self.upperBound[1] * 100)
ax[0].set_ylim((maxz * 1.2, 0))
ax[1].set_xlim(self.lowerBound[2] * 1000, self.upperBound[2] * 1000)
ax[1].set_ylim((maxz * 1.2, 0))
xt = [10, 20, 50, 100, 200, 500, 1000]
ax[1].set_xticks(xt)
ax[1].set_xticklabels([str(xti) for xti in xt])
if savefile:
fig.savefig(self.EAstatfile.replace('.csv', '.pdf'),
bbox_inches='tight')
plt.show()
def plotEAstatistics(self, fname=None):
"""Plot EA statistics (best, worst, ...) over time."""
if fname is None:
fname = self.EAstatfile
gen, psize, worst, best, med, avg, std = np.genfromtxt(
fname, unpack=True, usecols=range(7), delimiter=',')
stderr = std / np.sqrt(psize)
data = [avg, med, best, worst]
colors = ['black', 'blue', 'green', 'red']
labels = ['average', 'median', 'best', 'worst']
fig, ax = plt.subplots()
self.figs['statistics'] = fig
ax.errorbar(gen, avg, stderr, color=colors[0], label=labels[0])
ax.set_yscale('log')
for d, col, lab in zip(data[1:], colors[1:], labels[1:]):
ax.plot(gen, d, color=col, label=lab)
ax.fill_between(gen, data[2], data[3], color='#e6f2e6')
ax.grid(True)
ymin = min([min(d) for d in data])
ymax = max([max(d) for d in data])
yrange = ymax - ymin
ax.set_ylim((ymin - 0.1*yrange, ymax + 0.1*yrange))
ax.legend(loc='upper left') # , prop=prop)
ax.set_xlabel('Generation')
ax.set_ylabel('Fitness')
def saveFigs(self, basename=None, extension="pdf"):
"""Save all figures to (pdf) files."""
if basename is None:
basename = self.basename
for key in self.figs:
self.figs[key].savefig(basename+"-"+key+"."+extension,
bbox_inches='tight')
if __name__ == "__main__":
datafile = 'example.mrsi'
numlayers = 4
mrs = MRS(datafile)
mrs.run(numlayers, uncertainty=True)
outThk, outWC, outT2 = mrs.result()
mrs.saveResult(mrs.basename+'.result')
mrs.showResultAndFit(save=mrs.basename+'.pdf')
plt.show()
|
apache-2.0
|
ricardog/raster-project
|
projections/hpd/wpp.py
|
1
|
3094
|
#!/usr/bin/env python
import tempfile
from joblib import memory
import numpy as np
import pandas as pd
from .. import tiff_utils
MEMCACHE = memory.Memory(cachedir=tempfile.mkdtemp(prefix='hpd-wpp'),
verbose=0, mmap_mode='r')
class WPP(object):
def __init__(self, trend, year, fname):
self._trend = 'historical' if year < 2011 else trend
self._year = year
self._fname = fname
self._sheet = get_sheets(self._trend, fname)[0]
if year not in get_years(self.sheet):
raise RuntimeError('year %d not available in trend %s projection' %
(year, trend))
return
@property
def year(self):
return self._year
@property
def trend(self):
return self._trend
@property
def sheet(self):
return self._sheet
@property
def syms(self):
return ['un_code', 'hpd_ref']
def eval(self, df):
return project(self.trend, self.sheet, df['un_code'], df['hpd_ref'],
None, self.year, np.nan)
def remap(what, table, nomatch=None):
f = np.vectorize(lambda x: table[x] if x in table else nomatch,
otypes=[np.float32])
shape = what.shape
tmp = f(what.flatten())
return tmp.reshape(*shape)
def check_years(sheet, years):
if years is None:
return set()
available = set(sheet.iloc[14, 5:].astype(int).tolist())
yset = set(years)
return yset - available
@MEMCACHE.cache
def get_sheets(trend, wpp):
trend = 'estimates' if trend == 'historical' else trend
xls = pd.ExcelFile(wpp)
if trend == 'all':
names = filter(lambda x: x != u'NOTES', xls.sheet_names)
else:
assert trend.upper() in xls.sheet_names
names = [trend.upper()]
sheets = [pd.read_excel(wpp, name) for name in names]
for name, sheet in zip(names, sheets):
## FIXME: I store the name of the sheet (or tab) in cell (0, 0)
## becuase Memcache will not preserve metadata attributes. Once
## this gets fixed in pandas, it would be cleaner to create an
## attribute (name) that stores the sheet name.
sheet.ix[0, 0] = name.lower()
return sheets
def get_years(sheet):
return sheet.iloc[14, 5:].astype(int).tolist()
def project(trend, sheet, countries, grumps, mask, year, nodata):
## Some of the cells representing the year are treated as strings and
## some as integers so check for both.
col = np.logical_or(sheet.iloc[14].isin([year]),
sheet.iloc[14].isin([str(year)]))
if not np.any(col):
raise ValueError
ccode = sheet.iloc[15:, 4].astype(int).tolist()
hist = sheet.ix[15:, col]
if trend == 'historical':
ref = sheet.ix[15:, u'Unnamed: 57']
else:
ref = sheet.ix[15:, u'Unnamed: 5']
pop = hist.divide(ref, axis="index").astype('float32').values
mydict = dict((v, pop[ii]) for ii, v in enumerate(ccode))
growth = remap(countries, mydict, nodata)
unknown_mask = np.where(growth == nodata, True, False)
if mask:
my_mask = np.logical_or(mask, unknown_mask)
else:
my_mask = unknown_mask
new_pop = np.multiply(grumps, growth)
return np.where(my_mask, nodata, new_pop)
|
apache-2.0
|
macks22/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
244
|
7588
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
COSMOGRAIL/PyCS
|
pycs/disp/old/multispec.py
|
1
|
4026
|
"""
Stuff to make multi-D dispersion spectra, for instance for Mayavi ...
For now this works only for 4 curves, to give 3 dimensions.
"""
import sys
import numpy as np
#import matplotlib.pyplot as plt
import pycs.gen.util as util
import pycs.gen.lc as lc
import pycs.gen.polyml as ml
def dispcube(lcs, rawdispersionmethod, verbose=True, timewidth=30, timestep=1.0, optml=False, filename="dispcube.pkl"):
"""
3D specplot, calculates the dispersion over a cube of time-delays. And writes the result in a pickle.
For now this is quick and dirty programming : we want only 4 lightcurves.
This pickle can then be looked at with Mayavi (see example below)
"""
if len(lcs)!=4:
raise RuntimeError, "I want 4 lightcurves."
lcsc = [l.copy() for l in lcs]
# We apply microlensing if we do not need it.
for l in lcsc:
if optml==False and l.ml != None:
l.applyml()
# We generate the list of possible couples.
couplelist = [couple for couple in [[lc1, lc2] for lc1 in lcsc for lc2 in lcsc] if couple[0] != couple[1]]
#for couple in couplelist:
# print couple[0].object, couple[1].object
#nfree = len(lcs)-1
if optml:
pass
# def d2value(params):
# lc.multisettimedelays(lcsc, params[:nfree])
# ml.multisetfreeparams(lcsc, params[nfree:])
#
# # optimize ml here
#
# d2values = np.array([rawdispersionmethod(*couple)["d2"] for couple in couplelist])
# ret = np.mean(d2values)
# #print ret
# return ret
#
else:
def d2value(delays):
lc.multisettimedelays(lcsc, delays)
d2values = np.array([rawdispersionmethod(*couple)["d2"] for couple in couplelist])
ret = np.mean(d2values)
#print ret
return ret
initparams = np.concatenate([lc.multigettimedelays(lcsc), ml.multigetfreeparams(lcsc)])
print "Initial params : ", initparams
timeshifts = np.arange(-(timewidth)*timestep/2.0, (timewidth+1)*timestep/2.0, timestep)
cubeindexes = np.arange(timewidth + 1)
print "Points to calculate :", len(timeshifts)**3
d2cube = np.zeros((timewidth+1, timewidth+1, timewidth+1))
xshifts = timeshifts + initparams[0]
yshifts = timeshifts + initparams[1]
zshifts = timeshifts + initparams[2]
if optml==False:
for ix in cubeindexes:
print "Slice %i of %i" % (ix + 1, timewidth+1)
for iy in cubeindexes:
for iz in cubeindexes:
d2cube[ix, iy, iz] = d2value([xshifts[ix], yshifts[iy], zshifts[iz]])
# We do as if we used mgrid... no, this would be ogrid...
#xshifts = xshifts.reshape(xshifts.size, 1, 1)
#yshifts = yshifts.reshape(1, yshifts.size, 1)
#zshifts = zshifts.reshape(1, 1, zshifts.size)
beg = -(timewidth)*timestep/2.0
end = (timewidth+1)*timestep/2.0
step = timestep
x, y, z = np.mgrid[beg:end:step, beg:end:step, beg:end:step]
#print x, y, z
x += initparams[0]
y += initparams[1]
z += initparams[2]
#print x, y, z
util.writepickle({"lcs":lcs, "x":x, "y":y, "z":z, "d2":d2cube}, filename)
# To give an idea how to plot such a data cube with Mayavi2/mlab :
# import sys
# sys.path.append("../")
# from pycs.gen import *
# import numpy as np
# from enthought.mayavi import mlab
#
# pkldict = util.readpickle("dispcube50.pkl")
#
# maxval = 1.5
# minval = 1.43
#
# x = pkldict["x"]
# y = pkldict["y"]
# z = pkldict["z"]
# d2 = pkldict["d2"]
#
# lcs = pkldict["lcs"]
#
# minpos = np.argmin(d2)
# minpos = np.unravel_index(minpos, d2.shape)
# min_x = x[minpos]
# min_y = y[minpos]
# min_z = z[minpos]
#
#
# mlab.clf()
#
# src = mlab.pipeline.scalar_field(x, y, z, d2)
#
# # in green, the minimum
# mlab.points3d([min_x], [min_y], [min_z], color=(0,1,0), mode="cube", scale_mode="none", resolution=14, scale_factor=0.15)
#
# mlab.pipeline.scalar_cut_plane(src, vmin=minval, vmax=maxval)
#
# mlab.colorbar(title='Dispersion', orientation='vertical')
#
# mlab.xlabel("%s%s"% (lcs[0].object, lcs[1].object))
# mlab.ylabel("%s%s"% (lcs[0].object, lcs[2].object))
# mlab.zlabel("%s%s"% (lcs[0].object, lcs[3].object))
#
#
# mlab.show()
|
gpl-3.0
|
tectronics/agpy
|
agpy/blackbody.py
|
6
|
26278
|
"""
============================
Simple black-body calculator
============================
Includes both wavelength and frequency blackbody functions. Has flexible
units. Also allows for a few varieties of modified blackbody.
"""
try:
from numpy import exp
except ImportError:
from math import exp
unitdict = {'cgs':{'h':6.626068e-27,
'k':1.3806503e-16,
'c':2.99792458e10,
'mh':1.67262158e-24 * 1.00794,
'length':'cm'},
'mks':{'h':6.626068e-34,
'k':1.3806503e-23,
'c':2.99792458e8,
'mh':1.67262158e-27 * 1.00794,
'length':'m'}
}
frequency_dict = {'Hz':1.0,
'kHz':1e3,
'MHz':1e6,
'GHz':1e9,
'THz':1e12,
}
def blackbody(nu,temperature, scale=1.0, units='cgs',frequency_units='Hz',
normalize=max, beta=0):
# load constants in desired units
h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c']
# convert nu to Hz
nu = nu * frequency_dict[frequency_units]
I = 2*h*nu**3 / c**2 * (exp(h*nu/(k*temperature)) - 1)**-1
if normalize and hasattr(I,'__len__'):
if len(I) > 1:
return I/normalize(I) * scale
else:
return I * scale
else:
return I * scale
wavelength_dict = {'meters':1.0,'m':1.0,
'centimeters':1e-2,'cm':1e-2,
'millimeters':1e-3,'mm':1e-3,
'nanometers':1e-9,'nm':1e-9,
'micrometers':1e-6,'micron':1e-6,'microns':1e-6,'um':1e-6,
'kilometers':1e3,'km':1e3,
'angstroms':1e-10,'A':1e-10,'Angstroms':1e-10,
}
def blackbody_wavelength(lam,temperature, scale=1.0,
units='cgs',wavelength_units='Angstroms', normalize=max, beta=0):
# load constants in desired units
h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c']
# converta lambd to cm/m
lam = lam * wavelength_dict[wavelength_units] / (1e-2 if units=='cgs' else 1)
I = 2*h*c**2 / lam**5 * (exp(h*c/(k*temperature*lam)) - 1)**-1
if normalize and hasattr(I,'__len__'):
if len(I) > 1:
return I/normalize(I) * scale
else:
return I * scale
else:
return I * scale
def modified_blackbody(nu, temperature, beta=1.75, logN=22, logscale=0.0,
muh2=2.8, units='cgs',frequency_units='Hz', kappa0=4.0,
nu0=505e9, normalize=max, dusttogas=100.):
"""
Snu = 2hnu^3 c^-2 (e^(hnu/kT) - 1)^-1 (1 - e^(-tau_nu) )
Kappa0 and Nu0 are set as per http://arxiv.org/abs/1101.4654 which uses OH94 values.
beta = 1.75 is a reasonable default for Herschel data
N = 1e22 is the column density in cm^-2
nu0 and nu must have same units!
Parameters
----------
nu : float
Frequency in units of `frequency_units`
temperature : float
Temperature in Kelvins
beta : float
The blackbody modification value; the blackbody function is multiplied
by :math:`(1-exp(-(\\nu/\\nu_0)**\\beta))`
logN : float
The log column density to be fit
logscale : float
An arbitrary logarithmic scale to apply to the blackbody function
before passing it to mpfit; this is meant to prevent numerical
instability when attempting to fit very small numbers.
Can also be used to represent, e.g., steradians
muh2 : float
The mass (in amu) per molecule of H2. Defaults to 2.8.
units : 'cgs' or 'mks'
The unit system to use
frequency_units : string
Hz or some variant (GHz, kHz, etc)
kappa0 : float
The opacity in cm^2/g *for gas* at nu0 (see dusttogas)
nu0 : float
The frequency at which the opacity power law is locked
normalize : function or None
A normalization function for the blackbody. Set to None if you're
interested in the amplitude of the blackbody
dusttogas : float
The dust to gas ratio. The opacity kappa0 is divided by this number to
get the opacity of the dust
"""
h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c']
mh = unitdict[units]['mh']
kappanu = kappa0 / dusttogas * (nu/nu0)**beta
# numpy apparently can't multiply floats and longs
tau = muh2 * mh * kappanu * 10.0**logN
modification = (1.0 - exp(-1.0 * tau))
I = blackbody(nu, temperature, units=units,
frequency_units=frequency_units,
normalize=normalize)*modification
if normalize and hasattr(I,'__len__'):
if len(I) > 1:
return I/normalize(I) * 10.**logscale
else:
return I * 10.**logscale
else:
return I * 10.**logscale
def greybody(nu, temperature, beta, A=1.0, logscale=0.0,
units='cgs', frequency_units='Hz', kappa0=4.0, nu0=3000e9,
normalize=max):
"""
Same as modified blackbody... not sure why I have it at all, though the
normalization constants are different.
"""
h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c']
modification = (1. - exp(-(nu/nu0)**beta))
I = blackbody(nu,temperature,units=units,frequency_units=frequency_units,normalize=normalize)*modification
if normalize and hasattr(I,'__len__'):
if len(I) > 1:
return I/normalize(I) * 10.**logscale
else:
return I * 10.**logscale
else:
return I * 10.**logscale
def modified_blackbody_wavelength(lam, temperature, beta=1.75, logN=22,
logscale=0.0, muh2=2.8, units='cgs',
wavelength_units='Angstroms', kappa0=4.0,
nu0=505e9, dusttogas=100., normalize=max):
"""
Snu = 2hnu^3 c^-2 (e^(hnu/kT) - 1)^-1 (1 - e^(-tau_nu) )
Kappa0 and Nu0 are set as per http://arxiv.org/abs/1101.4654 which uses OH94 values.
beta = 1.75 is a reasonable default for Herschel data
N = 1e22 is the column density in cm^-2
This is the blackbody function *in frequency units*
nu0 and nu must have same units! But wavelength is converted to frequency
of the right unit anyway
Parameters
----------
lam : float
Wavelength in units of `wavelength_units`
temperature : float
Temperature in Kelvins
beta : float
The blackbody modification value; the blackbody function is multiplied
by :math:`(1-exp(-(\\nu/\\nu_0)**\\beta))`
logN : float
The log column denisty to be fit
logscale : float
An arbitrary logarithmic scale to apply to the blackbody function
before passing it to mpfit; this is meant to prevent numerical
instability when attempting to fit very small numbers.
Can also be used to represent, e.g., steradians
muh2 : float
The mass (in amu) per molecule of H2. Defaults to 2.8.
units : 'cgs' or 'mks'
The unit system to use
wavelength_units : string
A valid wavelength (e.g., 'angstroms', 'cm','m')
kappa0 : float
The opacity in cm^2/g *for gas* at nu0 (see dusttogas)
nu0 : float
The frequency at which the opacity power law is locked.
kappa(nu) = kappa0/dusttogas * (nu/nu0)**beta
normalize : function or None
A normalization function for the blackbody. Set to None if you're
interested in the amplitude of the blackbody
dusttogas : float
The dust to gas ratio. The opacity kappa0 is divided by this number to
get the opacity of the dust
"""
h,k,c = unitdict[units]['h'],unitdict[units]['k'],unitdict[units]['c']
mh = unitdict[units]['mh']
nu = c/(lam*wavelength_dict[wavelength_units]/wavelength_dict[unitdict[units]['length']])
#I = modified_blackbody(nu, temperature, beta=beta, frequency_units='Hz',
# normalize=normalize, nu0=nu0, kappa0=kappa0,
# muh2=muh2, logscale=logscale, units=units,
# logN=logN)
kappanu = kappa0/dusttogas * (nu/nu0)**beta
tau = muh2 * mh * kappanu * 10.**logN
modification = (1.0 - exp(-1.0 * tau))
I = blackbody(nu,temperature,units=units,frequency_units='Hz',normalize=normalize)*modification
if normalize and hasattr(I,'__len__'):
if len(I) > 1:
return I/normalize(I) * 10.**logscale
else:
return I * 10.**logscale
else:
return I * 10**logscale
try:
import agpy.mpfit as mpfit
def fit_blackbody(xdata, flux, guesses=(0,0), err=None,
blackbody_function=blackbody, quiet=True, **kwargs):
"""
Parameters
----------
xdata : array
Array of the X-values (frequency, wavelength) of the data
flux : array
The fluxes corresponding to the xdata values
guesses : (Temperature,Scale) or (Temperature,Beta,Scale)
The input guesses. 3 parameters are used for greybody
fitting, two for temperature fitting.
blackbody_function: function
Must take x-axis (e.g. frequency), temperature, scale, and then
optionally beta args
quiet : bool
quiet flag passed to mpfit
Returns
-------
mp : mpfit structure
An mpfit structure. Access parameters and errors via
`mp.params` and `mp.perror`. The covariance matrix
is in mp.covar.
Examples
--------
>>> wavelength = array([20,70,160,250,350,500,850,1100])
>>> flux = modified_blackbody_wavelength(wavelength, 15, beta=1.75,
logN=22, wavelength_units='microns', normalize=False,
logscale=16)
>>> err = 0.1 * flux
>>> np.random.seed(0)
>>> flux += np.random.randn(len(wavelength)) * err
>>> tguess, bguess, nguess = 20.,2.,21.5
>>> mp = fit_blackbody(wavelength, flux, err=err,
blackbody_function=modified_blackbody_wavelength,
logscale=16, guesses=(tguess, bguess, nguess),
wavelength_units='microns')
>>> print mp.params
[ 14.99095224 1.78620237 22.05271119]
>>> # T~14.9 K, beta ~1.79, column ~10^22
"""
def mpfitfun(x,y,err):
if err is None:
def f(p,fjac=None): return [0,(y-blackbody_function(x, *p,
normalize=False, **kwargs))]
else:
def f(p,fjac=None): return [0,(y-blackbody_function(x, *p,
normalize=False, **kwargs))/err]
return f
err = err if err is not None else flux*0.0 + 1.0
mp = mpfit.mpfit(mpfitfun(xdata,flux,err), guesses, quiet=quiet)
return mp
except ImportError:
pass
try:
import lmfit
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import numpy as np
def fit_blackbody_lmfit(xdata, flux, guesses=(0,0), err=None,
blackbody_function=blackbody, quiet=True, **kwargs):
"""
Parameters
----------
xdata : array
Array of the X-values (frequency, wavelength) of the data
flux : array
The fluxes corresponding to the xdata values
guesses : (Temperature,Scale) or (Temperature,Beta,Scale)
The input guesses. 3 parameters are used for greybody
fitting, two for temperature fitting.
blackbody_function: function
Must take x-axis (e.g. frequency), temperature, scale, and then
optionally beta args
quiet : bool
quiet flag passed to mpfit
kwargs are past to blackbody function
Examples
--------
>>> wavelength = np.array([20,70,160,250,350,500,850,1100])
>>> flux = modified_blackbody_wavelength(wavelength, 15, beta=1.75,
wavelength_units='microns', normalize=False, logN=22, logscale=16)
>>> err = 0.1 * flux
>>> flux += np.random.randn(len(wavelength)) * err
>>> tguess, bguess, nguess = 20.,2.,21.5
>>> lm = fit_blackbody_lmfit(wavelength, flux, err=err,
blackbody_function=modified_blackbody_wavelength, logscale=16,
guesses=(tguess,bguess,nguess),
wavelength_units='microns')
>>> print lm.params
>>> # If you want to fit for a fixed beta, do this:
>>> parameters = lmfit.Parameters(OrderedDict([ (n,lmfit.Parameter(x)) for n,x
in zip(('T','beta','N'),(20.,2.,21.5)) ]))
>>> import lmfit
>>> parameters['beta'].vary = False
>>> lm = fit_blackbody_lmfit(wavelength, flux, err=err,
blackbody_function=modified_blackbody_wavelength, logscale=16,
guesses=parameters,
wavelength_units='microns')
>>> print lm.params
"""
def lmfitfun(x,y,err):
if err is None:
def f(p): return (y-blackbody_function(x, *[p[par].value for par in p],
normalize=False, **kwargs))
else:
def f(p): return (y-blackbody_function(x, *[p[par].value for par in p],
normalize=False, **kwargs))/err
return f
if not isinstance(guesses,lmfit.Parameters):
guesspars = lmfit.Parameters(
OrderedDict([ (n,lmfit.Parameter(value=x,name=n))
for n,x in zip(('T','beta','N'),guesses) ]))
else:
guesspars = guesses
minimizer = lmfit.minimize( lmfitfun(xdata,np.array(flux),err),
guesspars)
return minimizer
except ImportError:
pass
try:
import pymodelfit
# FAILS:
# SyntaxError: can't have kwargs in model function
#class pmf_blackbody(pymodelfit.FunctionModel1DAuto):
# def f(self, x, T=20.0, scale=1.0, beta=1.5,
# blackbody_function=blackbody, **kwargs):
# return blackbody_function(x, T, scale, beta=beta)
except ImportError:
pass
try:
import numpy as np
old_errsettings = np.geterr()
import pymc # pymc breaks np error settings
np.seterr(**old_errsettings)
from scipy.integrate import quad
def fit_blackbody_montecarlo(frequency, flux, err=None,
temperature_guess=10, beta_guess=None,
scale_guess=None,
blackbody_function=blackbody, quiet=True,
return_MC=False, nsamples=5000, burn=1000,
min_temperature=0, max_temperature=100,
scale_keyword='scale', max_scale=1e60,
multivariate=False, **kwargs):
"""
Parameters
----------
frequency : array
Array of frequency values
flux : array
array of flux values
err : array (optional)
Array of error values (1-sigma, normal)
temperature_guess : float
Input / starting point for temperature
min_temperature : float
max_temperature : float
Lower/Upper limits on fitted temperature
beta_guess : float (optional)
Opacity beta value
scale_guess : float
Arbitrary scale value to apply to model to get correct answer
blackbody_function: function
Must take x-axis (e.g. frequency), temperature, then scale and beta
keywords (dependence on beta can be none)
return_MC : bool
Return the pymc.MCMC object?
nsamples : int
Number of samples to use in determining the posterior distribution
(the answer)
burn : int
number of initial samples to ignore
scale_keyword : ['scale','logscale','logN']
What scale keyword to pass to the blackbody function to determine
the amplitude
kwargs : kwargs
passed to blackbody function
"""
d = {}
d['temperature'] = pymc.distributions.Uniform('temperature',
min_temperature, max_temperature, value=temperature_guess)
d['scale'] = pymc.distributions.Uniform('scale',0,max_scale,
value=scale_guess)
if beta_guess is not None:
d['beta'] = pymc.distributions.Uniform('beta',0,10,
value=beta_guess)
else:
d['beta'] = pymc.distributions.Uniform('beta',0,0,
value=0)
@pymc.deterministic
def luminosity(temperature=d['temperature'], beta=d['beta'],
scale=d['scale']):
f = lambda nu: blackbody_function(nu, temperature, logN=scale,
beta=beta, normalize=False)
# integrate from 0.1 to 10,000 microns (100 angstroms to 1 cm)
# some care should be taken; going from 0 to inf results in failure
return quad(f, 1e4, 1e17)[0]
d['luminosity'] = luminosity
@pymc.deterministic
def bb_model(temperature=d['temperature'], scale=d['scale'],
beta=d['beta']):
kwargs[scale_keyword] = scale
y = blackbody_function(frequency, temperature, beta=beta,
normalize=False, **kwargs)
#print kwargs,beta,temperature,(-((y-flux)**2)).sum()
return y
d['bb_model'] = bb_model
if err is None:
d['err'] = pymc.distributions.Uninformative('error',value=1.)
else:
d['err'] = pymc.distributions.Uninformative('error',value=err,observed=True)
d['flux'] = pymc.distributions.Normal('flux', mu=d['bb_model'],
tau=1./d['err']**2, value=flux,
observed=True)
#print d.keys()
MC = pymc.MCMC(d)
if nsamples > 0:
MC.sample(nsamples, burn=burn)
if return_MC:
return MC
MCfit = pymc.MAP(MC)
MCfit.fit()
T = MCfit.temperature.value
scale = MCfit.scale.value
if beta_guess is not None:
beta = MCfit.beta.value
return T,scale,beta
else:
return T,scale
return MC
except ImportError:
pass
if __name__=="__main__":
print "Fitting tests"
import itertools
import numpy as np
import agpy
import pylab
import matplotlib
temperatures = [5,10,15,20,25]
betas = [1,1.25,1.5,1.75,2.0,2.25,2.5]
columns = [22,23,24]
wavelengths = [#np.array([70,160,250,350,500,850,1100],dtype='float'),
np.array([160,250,350,500,1100],dtype='float'),
np.array([160,250,350,500],dtype='float')]
errlevels = [0.1,0.2,0.05]
temperature = 20.
beta = 1.75
column = 22
wavelength = wavelengths[0]
errlevel = errlevels[0]
#for temperature,beta,column in itertools.product(temperatures,betas,columns):
tguess=20
bguess=1.5
nguess=21
tguess=temperature
bguess=beta
nguess=column
bbmcs = {}
MCtest = False
if MCtest:
for ii,(errlevel,wavelength) in enumerate(itertools.product(errlevels,wavelengths)):
flux = modified_blackbody_wavelength(wavelength, temperature,
beta=beta, wavelength_units='microns', normalize=False, logN=column,
logscale=16)
err = flux*errlevel
bbmc = fit_blackbody_montecarlo(wavelength, flux,
blackbody_function=modified_blackbody_wavelength,
return_MC=True, wavelength_units='microns', nsamples=1,
scale_guess=nguess, beta_guess=bguess, temperature_guess=tguess,
scale_keyword='logN', max_scale=30, err=err, logscale=16,
burn=0)
flux = bbmc.flux.rand()
bbmc = fit_blackbody_montecarlo(wavelength, flux,
blackbody_function=modified_blackbody_wavelength,
return_MC=True, wavelength_units='microns', nsamples=30000,
scale_guess=nguess, beta_guess=bguess, temperature_guess=tguess,
scale_keyword='logN', max_scale=30, err=err, logscale=16,
burn=10000)
bbmcs[errlevel+len(wavelength)] = bbmc
mp = fit_blackbody(wavelength, flux, err=err,
blackbody_function=modified_blackbody_wavelength,
logscale=16, guesses=(tguess, bguess, nguess), wavelength_units='microns')
print
print " %10s %10s %10s %10s %10s %10s %10s %10s %10s S/N=%f wls: %s" % ('T','e(T)','B','e(B)','N','e(N)','T-B','B-N','T-N',1/errlevel,wavelength)
print "input %10.3g %10s %10.3g %10s %10.3g %10s " % (temperature,"",beta,"",column,"")
mppars = [p for a in zip(mp.params,mp.perror) for p in a]
print("chi^2 %10.3g %10.3g %10.3g %10.3g %10.3g %10.3g " % tuple(mppars)+
"%10.3g %10.3g %10.3g " % (mp.covar[1,0],mp.covar[2,1],mp.covar[2,0]))
mcstats = bbmc.stats()
try:
N = pymc.NormApprox(bbmc)
N.fit()
Ncov = N.C[N.temperature,N.beta,N.scale]
mcpars = [p for a in zip(N.mu[N.temperature, N.beta, N.scale],
np.array(Ncov.diagonal())[0]) for p in a]
print("MCMC %10.3g %10.3g %10.3g %10.3g %10.3g %10.3g " % tuple(mcpars)+
"%10.3g %10.3g %10.3g " % (Ncov[1,0],Ncov[2,1],Ncov[2,0]))
except np.linalg.linalg.LinAlgError:
mcpars = (mcstats['temperature']['mean'],mcstats['temperature']['standard deviation'],
mcstats['beta']['mean'],mcstats['beta']['standard deviation'],
mcstats['scale']['mean'],mcstats['scale']['standard deviation'])
print("MCMC %10.3g %10.3g %10.3g %10.3g %10.3g %10.3g " % tuple(mcpars))
except ValueError:
mcpars = (mcstats['temperature']['mean'],mcstats['temperature']['standard deviation'],
mcstats['beta']['mean'],mcstats['beta']['standard deviation'],
mcstats['scale']['mean'],mcstats['scale']['standard deviation'])
print("MCMC %10.3g %10.3g %10.3g %10.3g %10.3g %10.3g " % tuple(mcpars))
pylab.figure(ii)
pylab.clf()
agpy.pymc_plotting.hist2d(bbmc,'temperature','beta',fignum=ii,varslice=(0,None,None),bins=20)
ax=pylab.gca()
ax.add_artist(matplotlib.patches.Ellipse(mp.params[:2], mp.perror[0], mp.perror[1],
mp.covar[1,0]/mp.covar[0,0]*90, edgecolor='green',
facecolor='none'))
pylab.plot(temperature,beta,'kx')
pylab.figure(ii+10)
pylab.clf()
input_flux = modified_blackbody_wavelength(wavelength, temperature,
beta=beta, wavelength_units='microns', normalize=False, logN=column,
logscale=16)
recovered_flux = modified_blackbody_wavelength(wavelength, mcpars[0],
beta=mcpars[2], wavelength_units='microns', normalize=False, logN=mcpars[4],
logscale=16)
pylab.plot(wavelength, flux, marker='o', linestyle='none')
pylab.errorbar(wavelength, input_flux, err, linestyle='none')
pylab.plot(wavelength, recovered_flux)
for kk,errlevel in enumerate(errlevels):
pylab.figure(100+kk)
pylab.clf()
for ii,(wavelength) in enumerate(wavelengths):
flux = modified_blackbody_wavelength(wavelength, temperature,
beta=beta, wavelength_units='microns', normalize=False, logN=column,
logscale=16)
err = flux*errlevel
err[wavelength<1100] = flux[wavelength<1100]*0.02
bbmc = fit_blackbody_montecarlo(wavelength, flux,
blackbody_function=modified_blackbody_wavelength,
return_MC=True, wavelength_units='microns', nsamples=1,
scale_guess=nguess, beta_guess=bguess, temperature_guess=tguess,
scale_keyword='logN', max_scale=30, err=err, logscale=16,
burn=0)
mps = []
pylab.figure(kk)
pylab.clf()
pylab.plot(temperature,beta,'kx')
betas,betaerr,temps,temperr = [],[],[],[]
for jj in xrange(5):
flux = bbmc.flux.rand()
mp = fit_blackbody(wavelength, flux, err=err,
blackbody_function=modified_blackbody_wavelength,
logscale=16, guesses=(tguess, bguess, nguess), wavelength_units='microns')
mps.append(mp)
temps.append(mp.params[0])
betas.append(mp.params[1])
betaerr.append(mp.perror[1])
temperr.append(mp.perror[0])
pylab.errorbar(temps,betas,xerr=temperr, yerr=betaerr, linestyle='none')
pylab.figure(100+kk)
print "%s sn=%f beta=%f+/-%f" % (wavelength[-1],1/errlevel,np.mean(betas),np.std(betas))
pylab.hist(betas,alpha=0.5,label="Longest Wavelength %s $\\mu m$ S/N=%0.1f" % (wavelength[-1],1/errlevel),histtype='stepfilled',bins=20)
if ii==0:
pylab.vlines(beta,*pylab.gca().get_ylim(),linestyle='--',color='k',label="Input value $\\beta=%f$" % beta)
pylab.legend(loc='best')
#pylab.savefig("/Users/adam/agpy/tests/longwav%i_sn%i_Herschelsn50_bb_test.png" % (wavelength[-1],1/errlevel))
|
mit
|
maxalbert/geopandas
|
geopandas/base.py
|
6
|
17504
|
from warnings import warn
from shapely.geometry import MultiPoint, MultiLineString, MultiPolygon
from shapely.geometry.base import BaseGeometry
from shapely.ops import cascaded_union, unary_union
import shapely.affinity as affinity
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, MultiIndex
import geopandas as gpd
try:
from geopandas.sindex import RTreeError, SpatialIndex
HAS_SINDEX = True
except ImportError:
class RTreeError(Exception):
pass
HAS_SINDEX = False
def _geo_op(this, other, op):
"""Operation that returns a GeoSeries"""
if isinstance(other, GeoPandasBase):
this = this.geometry
crs = this.crs
if crs != other.crs:
warn('GeoSeries crs mismatch: {0} and {1}'.format(this.crs,
other.crs))
this, other = this.align(other.geometry)
return gpd.GeoSeries([getattr(this_elem, op)(other_elem)
for this_elem, other_elem in zip(this, other)],
index=this.index, crs=crs)
else:
return gpd.GeoSeries([getattr(s, op)(other)
for s in this.geometry],
index=this.index, crs=this.crs)
# TODO: think about merging with _geo_op
def _series_op(this, other, op, **kwargs):
"""Geometric operation that returns a pandas Series"""
null_val = False if op != 'distance' else np.nan
if isinstance(other, GeoPandasBase):
this = this.geometry
this, other = this.align(other.geometry)
return Series([getattr(this_elem, op)(other_elem, **kwargs)
if not this_elem.is_empty | other_elem.is_empty else null_val
for this_elem, other_elem in zip(this, other)],
index=this.index)
else:
return Series([getattr(s, op)(other, **kwargs) if s else null_val
for s in this.geometry], index=this.index)
def _geo_unary_op(this, op):
"""Unary operation that returns a GeoSeries"""
return gpd.GeoSeries([getattr(geom, op) for geom in this.geometry],
index=this.index, crs=this.crs)
def _series_unary_op(this, op, null_value=False):
"""Unary operation that returns a Series"""
return Series([getattr(geom, op, null_value) for geom in this.geometry],
index=this.index)
class GeoPandasBase(object):
def _generate_sindex(self):
self._sindex = None
if not HAS_SINDEX:
warn("Cannot generate spatial index: Missing package `rtree`.")
else:
stream = ((i, item.bounds, idx) for i, (idx, item) in
enumerate(self.geometry.iteritems()) if
pd.notnull(item) and not item.is_empty)
try:
self._sindex = SpatialIndex(stream)
# What we really want here is an empty generator error, or
# for the bulk loader to log that the generator was empty
# and move on. See https://github.com/Toblerity/rtree/issues/20.
except RTreeError:
pass
def _invalidate_sindex(self):
"""
Indicates that the spatial index should be re-built next
time it's requested.
"""
self._sindex = None
self._sindex_valid = False
@property
def area(self):
"""Return the area of each geometry in the GeoSeries"""
return _series_unary_op(self, 'area', null_value=np.nan)
@property
def geom_type(self):
"""Return the geometry type of each geometry in the GeoSeries"""
return _series_unary_op(self, 'geom_type', null_value=None)
@property
def type(self):
"""Return the geometry type of each geometry in the GeoSeries"""
return self.geom_type
@property
def length(self):
"""Return the length of each geometry in the GeoSeries"""
return _series_unary_op(self, 'length', null_value=np.nan)
@property
def is_valid(self):
"""Return True for each valid geometry, else False"""
return _series_unary_op(self, 'is_valid', null_value=False)
@property
def is_empty(self):
"""Return True for each empty geometry, False for non-empty"""
return _series_unary_op(self, 'is_empty', null_value=False)
@property
def is_simple(self):
"""Return True for each simple geometry, else False"""
return _series_unary_op(self, 'is_simple', null_value=False)
@property
def is_ring(self):
"""Return True for each geometry that is a closed ring, else False"""
# operates on the exterior, so can't use _series_unary_op()
return Series([geom.exterior.is_ring for geom in self.geometry],
index=self.index)
#
# Unary operations that return a GeoSeries
#
@property
def boundary(self):
"""Return the bounding geometry for each geometry"""
return _geo_unary_op(self, 'boundary')
@property
def centroid(self):
"""Return the centroid of each geometry in the GeoSeries"""
return _geo_unary_op(self, 'centroid')
@property
def convex_hull(self):
"""Return the convex hull of each geometry"""
return _geo_unary_op(self, 'convex_hull')
@property
def envelope(self):
"""Return a bounding rectangle for each geometry"""
return _geo_unary_op(self, 'envelope')
@property
def exterior(self):
"""Return the outer boundary of each polygon"""
# TODO: return empty geometry for non-polygons
return _geo_unary_op(self, 'exterior')
@property
def interiors(self):
"""Return the interior rings of each polygon"""
# TODO: return empty list or None for non-polygons
return _series_unary_op(self, 'interiors', null_value=False)
def representative_point(self):
"""Return a GeoSeries of points guaranteed to be in each geometry"""
return gpd.GeoSeries([geom.representative_point()
for geom in self.geometry],
index=self.index)
#
# Reduction operations that return a Shapely geometry
#
@property
def cascaded_union(self):
"""Deprecated: Return the unary_union of all geometries"""
return cascaded_union(self.values)
@property
def unary_union(self):
"""Return the union of all geometries"""
return unary_union(self.geometry.values)
#
# Binary operations that return a pandas Series
#
def contains(self, other):
"""Return True for all geometries that contain *other*, else False"""
return _series_op(self, other, 'contains')
def geom_equals(self, other):
"""Return True for all geometries that equal *other*, else False"""
return _series_op(self, other, 'equals')
def geom_almost_equals(self, other, decimal=6):
"""Return True for all geometries that is approximately equal to *other*, else False"""
# TODO: pass precision argument
return _series_op(self, other, 'almost_equals', decimal=decimal)
def geom_equals_exact(self, other, tolerance):
"""Return True for all geometries that equal *other* to a given tolerance, else False"""
# TODO: pass tolerance argument.
return _series_op(self, other, 'equals_exact', tolerance=tolerance)
def crosses(self, other):
"""Return True for all geometries that cross *other*, else False"""
return _series_op(self, other, 'crosses')
def disjoint(self, other):
"""Return True for all geometries that are disjoint with *other*, else False"""
return _series_op(self, other, 'disjoint')
def intersects(self, other):
"""Return True for all geometries that intersect *other*, else False"""
return _series_op(self, other, 'intersects')
def overlaps(self, other):
"""Return True for all geometries that overlap *other*, else False"""
return _series_op(self, other, 'overlaps')
def touches(self, other):
"""Return True for all geometries that touch *other*, else False"""
return _series_op(self, other, 'touches')
def within(self, other):
"""Return True for all geometries that are within *other*, else False"""
return _series_op(self, other, 'within')
def distance(self, other):
"""Return distance of each geometry to *other*"""
return _series_op(self, other, 'distance')
#
# Binary operations that return a GeoSeries
#
def difference(self, other):
"""Return the set-theoretic difference of each geometry with *other*"""
return _geo_op(self, other, 'difference')
def symmetric_difference(self, other):
"""Return the symmetric difference of each geometry with *other*"""
return _geo_op(self, other, 'symmetric_difference')
def union(self, other):
"""Return the set-theoretic union of each geometry with *other*"""
return _geo_op(self, other, 'union')
def intersection(self, other):
"""Return the set-theoretic intersection of each geometry with *other*"""
return _geo_op(self, other, 'intersection')
#
# Other operations
#
@property
def bounds(self):
"""Return a DataFrame of minx, miny, maxx, maxy values of geometry objects"""
bounds = np.array([geom.bounds for geom in self.geometry])
return DataFrame(bounds,
columns=['minx', 'miny', 'maxx', 'maxy'],
index=self.index)
@property
def total_bounds(self):
"""Return a single bounding box (minx, miny, maxx, maxy) for all geometries
This is a shortcut for calculating the min/max x and y bounds individually.
"""
b = self.bounds
return (b['minx'].min(),
b['miny'].min(),
b['maxx'].max(),
b['maxy'].max())
@property
def sindex(self):
if not self._sindex_valid:
self._generate_sindex()
self._sindex_valid = True
return self._sindex
def buffer(self, distance, resolution=16):
return gpd.GeoSeries([geom.buffer(distance, resolution)
for geom in self.geometry],
index=self.index, crs=self.crs)
def simplify(self, *args, **kwargs):
return gpd.GeoSeries([geom.simplify(*args, **kwargs)
for geom in self.geometry],
index=self.index, crs=self.crs)
def relate(self, other):
raise NotImplementedError
def project(self, other, normalized=False):
"""
Return the distance along each geometry nearest to *other*
Parameters
----------
other : BaseGeometry or GeoSeries
The *other* geometry to computed projected point from.
normalized : boolean
If normalized is True, return the distance normalized to
the length of the object.
The project method is the inverse of interpolate.
"""
return _series_op(self, other, 'project', normalized=normalized)
def interpolate(self, distance, normalized=False):
"""
Return a point at the specified distance along each geometry
Parameters
----------
distance : float or Series of floats
Distance(s) along the geometries at which a point should be returned
normalized : boolean
If normalized is True, distance will be interpreted as a fraction
of the geometric object's length.
"""
return gpd.GeoSeries([s.interpolate(distance, normalized)
for s in self.geometry],
index=self.index, crs=self.crs)
def translate(self, xoff=0.0, yoff=0.0, zoff=0.0):
"""
Shift the coordinates of the GeoSeries.
Parameters
----------
xoff, yoff, zoff : float, float, float
Amount of offset along each dimension.
xoff, yoff, and zoff for translation along the x, y, and z
dimensions respectively.
See shapely manual for more information:
http://toblerity.org/shapely/manual.html#affine-transformations
"""
return gpd.GeoSeries([affinity.translate(s, xoff, yoff, zoff)
for s in self.geometry],
index=self.index, crs=self.crs)
def rotate(self, angle, origin='center', use_radians=False):
"""
Rotate the coordinates of the GeoSeries.
Parameters
----------
angle : float
The angle of rotation can be specified in either degrees (default)
or radians by setting use_radians=True. Positive angles are
counter-clockwise and negative are clockwise rotations.
origin : string, Point, or tuple (x, y)
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point
object or a coordinate tuple (x, y).
use_radians : boolean
Whether to interpret the angle of rotation as degrees or radians
See shapely manual for more information:
http://toblerity.org/shapely/manual.html#affine-transformations
"""
return gpd.GeoSeries([affinity.rotate(s, angle, origin=origin,
use_radians=use_radians) for s in self.geometry],
index=self.index, crs=self.crs)
def scale(self, xfact=1.0, yfact=1.0, zfact=1.0, origin='center'):
"""
Scale the geometries of the GeoSeries along each (x, y, z) dimension.
Parameters
----------
xfact, yfact, zfact : float, float, float
Scaling factors for the x, y, and z dimensions respectively.
origin : string, Point, or tuple
The point of origin can be a keyword 'center' for the 2D bounding
box center (default), 'centroid' for the geometry's 2D centroid, a
Point object or a coordinate tuple (x, y, z).
Note: Negative scale factors will mirror or reflect coordinates.
See shapely manual for more information:
http://toblerity.org/shapely/manual.html#affine-transformations
"""
return gpd.GeoSeries([affinity.scale(s, xfact, yfact, zfact,
origin=origin) for s in self.geometry], index=self.index,
crs=self.crs)
def skew(self, xs=0.0, ys=0.0, origin='center', use_radians=False):
"""
Shear/Skew the geometries of the GeoSeries by angles along x and y dimensions.
Parameters
----------
xs, ys : float, float
The shear angle(s) for the x and y axes respectively. These can be
specified in either degrees (default) or radians by setting
use_radians=True.
origin : string, Point, or tuple (x, y)
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point
object or a coordinate tuple (x, y).
use_radians : boolean
Whether to interpret the shear angle(s) as degrees or radians
See shapely manual for more information:
http://toblerity.org/shapely/manual.html#affine-transformations
"""
return gpd.GeoSeries([affinity.skew(s, xs, ys, origin=origin,
use_radians=use_radians) for s in self.geometry],
index=self.index, crs=self.crs)
def explode(self):
"""
Explode multi-part geometries into multiple single geometries.
Single rows can become multiple rows.
This is analogous to PostGIS's ST_Dump(). The 'path' index is the
second level of the returned MultiIndex
Returns
------
A GeoSeries with a MultiIndex. The levels of the MultiIndex are the
original index and an integer.
Example
-------
>>> gdf # gdf is GeoSeries of MultiPoints
0 (POINT (0 0), POINT (1 1))
1 (POINT (2 2), POINT (3 3), POINT (4 4))
>>> gdf.explode()
0 0 POINT (0 0)
1 POINT (1 1)
1 0 POINT (2 2)
1 POINT (3 3)
2 POINT (4 4)
dtype: object
"""
index = []
geometries = []
for idx, s in self.geometry.iteritems():
if s.type.startswith('Multi') or s.type == 'GeometryCollection':
geoms = s.geoms
idxs = [(idx, i) for i in range(len(geoms))]
else:
geoms = [s]
idxs = [(idx, 0)]
index.extend(idxs)
geometries.extend(geoms)
return gpd.GeoSeries(geometries,
index=MultiIndex.from_tuples(index)).__finalize__(self)
def _array_input(arr):
if isinstance(arr, (MultiPoint, MultiLineString, MultiPolygon)):
# Prevent against improper length detection when input is a
# Multi*
geom = arr
arr = np.empty(1, dtype=object)
arr[0] = geom
return arr
|
bsd-3-clause
|
maxlikely/scikit-learn
|
sklearn/linear_model/tests/test_sparse_coordinate_descent.py
|
4
|
9158
|
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
ElasticNetCV)
def test_sparse_coef():
""" Check that the sparse_coef propery works """
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.todense().tolist()[0], clf.coef_)
def test_normalize_option():
""" Check that the normalize option in enet works """
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
"""Check that the sparse lasso can handle zero data without crashing"""
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
"""Test ElasticNet for various values of alpha and l1_ratio with list X"""
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
with warnings.catch_warnings(record=True):
# catch warning about alpha=0.
# this is discouraged but should work.
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
"""Test ElasticNet for various values of alpha and l1_ratio with sparse
X"""
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
# check that warm restart leads to the same result with
# sparse and dense versions
rng = np.random.RandomState(seed=0)
coef_init = rng.randn(n_features)
d_clf.fit(X_train.todense(), y_train, coef_init=coef_init)
s_clf.fit(X_train, y_train, coef_init=coef_init)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples / 2:], X[:n_samples / 2]
y_train, y_test = y[n_samples / 2:], y[:n_samples / 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.todense(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap, eps = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_, estimator.eps_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
assert_array_almost_equal(eps[k], estimator.eps_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
|
bsd-3-clause
|
petebachant/PXL
|
pxl/io.py
|
1
|
3887
|
# -*- coding: utf-8 -*-
"""
This is a collection of useful I/O functions.
"""
from __future__ import division, print_function
import numpy as np
import json
import pandas as _pd
import h5py as _h5py
def savejson(filename, datadict):
"""Save data from a dictionary in JSON format. Note that this only
works to the second level of the dictionary with Numpy arrays.
"""
for key, value in datadict.items():
if type(value) == np.ndarray:
datadict[key] = value.tolist()
if type(value) == dict:
for key2, value2 in value.items():
if type(value2) == np.ndarray:
datadict[key][key2] = value2.tolist()
with open(filename, "w") as f:
f.write(json.dumps(datadict, indent=4))
def loadjson(filename, asnparrays=False):
"""Load data from text file in JSON format.
Numpy arrays are converted if specified with the `asnparrays` keyword
argument. Note that this only works to the second level of the dictionary.
Returns a single dict.
"""
with open(filename) as f:
data = json.load(f)
if asnparrays:
for key, value in data.items():
if type(value) is list:
data[key] = np.asarray(value)
if type(value) is dict:
for key2, value2 in value.items():
if type(value2) is list:
data[key][key2] = np.asarray(value2)
return data
def savecsv(filename, datadict, mode="w"):
"""Save a dictionary of data to CSV."""
if mode == "a" :
header = False
else:
header = True
with open(filename, mode) as f:
_pd.DataFrame(datadict).to_csv(f, index=False, header=header)
def loadcsv(filename):
"""Load data from CSV file.
Returns a single dict with column names as keys.
"""
dataframe = _pd.read_csv(filename)
data = {}
for key, value in dataframe.items():
data[key] = value.values
return data
def savehdf(filename, datadict, groupname="data", mode="a", metadata=None,
as_dataframe=False, append=False):
"""Save a dictionary of arrays to file--similar to how `scipy.io.savemat`
works. If `datadict` is a DataFrame, it will be converted automatically.
"""
if as_dataframe:
df = _pd.DataFrame(datadict)
df.to_hdf(filename, groupname)
else:
if isinstance(datadict, _pd.DataFrame):
datadict = datadict.to_dict("list")
with _h5py.File(filename, mode) as f:
for key, value in datadict.items():
if append:
try:
f[groupname + "/" + key] = np.append(f[groupname + "/" + key], value)
except KeyError:
f[groupname + "/" + key] = value
else:
f[groupname + "/" + key] = value
if metadata:
for key, value in metadata.items():
f[groupname].attrs[key] = value
def loadhdf(filename, groupname="data", to_dataframe=False):
"""Load all data from top level of HDF5 file--similar to how
`scipy.io.loadmat` works.
"""
data = {}
with _h5py.File(filename, "r") as f:
for key, value in f[groupname].items():
data[key] = np.array(value)
if to_dataframe:
return _pd.DataFrame(data)
else:
return data
def save_hdf_metadata(filename, metadata, groupname="data", mode="a"):
""""Save a dictionary of metadata to a group's attrs."""
with _h5py.File(filename, mode) as f:
for key, val in metadata.items():
f[groupname].attrs[key] = val
def load_hdf_metadata(filename, groupname="data"):
""""Load attrs of the desired group into a dictionary."""
with _h5py.File(filename, "r") as f:
data = dict(f[groupname].attrs)
return data
|
gpl-3.0
|
pythonvietnam/scikit-learn
|
sklearn/utils/estimator_checks.py
|
3
|
51821
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
|
bsd-3-clause
|
gsnyder206/synthetic-image-morph
|
panstarrs/mock_panstarrs.py
|
1
|
8060
|
import cProfile
import pstats
import math
import string
import sys
import struct
import matplotlib
matplotlib.use('Agg')
import numpy as np
import scipy.ndimage
import scipy.stats as ss
import scipy.signal
import scipy as sp
import scipy.odr as odr
import glob
import os
import gzip
import tarfile
import shutil
import congrid
import astropy.io.ascii as ascii
import warnings
import subprocess
import photutils
from astropy.stats import gaussian_fwhm_to_sigma
from astropy.convolution import Gaussian2DKernel
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import *
import astropy.io.fits as pyfits
#import statmorph
import datetime
import setup_synthetic_images_mp as ssimp
# Based on candelize.py
def process_snapshot(subdirpath='.', clobber=False, galaxy=None,
seg_filter_label='ps1_i', magsb_limits=[21, 22, 23, 24, 25],
camindices=[0,1,2,3], do_idl=False, analyze=True, use_nonscatter=True, Np=4):
cwd = os.path.abspath(os.curdir)
os.chdir(subdirpath)
bbfile_list = np.sort(np.asarray(glob.glob('broadbandz.fits*'))) #enable reading .fits.gz files
print('bbfile_list =')
print(bbfile_list)
if galaxy is not None:
thisbb = np.where(bbfile_list==galaxy)[0]
bbfile_list= bbfile_list[thisbb]
test_file = bbfile_list[0]
tf = pyfits.open(test_file)
print(tf.info())
print(tf['BROADBAND'].header.cards)
print(tf['SFRHIST'].header.get('star_adaptive_smoothing'))
print(tf['SFRHIST'].header.get('star_radius_factor'))
#this is critical for later
fils = tf['FILTERS'].data.field('filter')
print(fils)
tf.close()
# Ignore GALEX for now.
# Data for later: 1.5 arcsec per pixel, ~25 sbmag limit,
# fwhm = 4.0, 5.6 arcsec for fuv and nuv, respectively
filters_to_analyze = [
'panstarrs/panstarrs_ps1_g',
'panstarrs/panstarrs_ps1_r',
'panstarrs/panstarrs_ps1_w',
'panstarrs/panstarrs_ps1_open',
'panstarrs/panstarrs_ps1_i',
'panstarrs/panstarrs_ps1_z',
'panstarrs/panstarrs_ps1_y']
skip_filter_boolean = [
False,
False,
False,
False,
False,
False,
False]
print(filters_to_analyze)
# Pixel size in arcsec.
pixsize_arcsec = [
0.262,
0.262,
0.262,
0.262,
0.262,
0.262,
0.262]
filter_labels = [
'ps1_g',
'ps1_r',
'ps1_w',
'ps1_open',
'ps1_i',
'ps1_z',
'ps1_y']
filter_indices = []
print(len(filters_to_analyze), len(skip_filter_boolean), len(filter_labels))
for i,f in enumerate(filters_to_analyze):
fi = np.where(fils==f)
print(fi[0][0], f, fils[fi[0][0]], filter_labels[i]) #, filters_to_analyze[fi]
filter_indices.append(fi[0][0])
filter_indices = np.asarray(filter_indices)
print(filter_indices)
# order of filter_labels in wavelength space
filter_lambda_order = [0, 1, 2, 3, 4, 5, 6]
#photfnu units Jy; flux in 1 ct/s
photfnu_Jy = [
2.14856e-07,
1.77931e-07,
5.49429e-08,
4.06004e-08,
1.81461e-07,
2.65602e-07,
6.62502e-07]
morphcode_dir = "/Users/gsnyder/Documents/pro/morph_december2013/morph_pro/"
morphcode_files = np.asarray(glob.glob(os.path.join(morphcode_dir,"*.*")))
se_dir = '/Users/gsnyder/Documents/Projects/Illustris_Morphology/Illustris-CANDELS/SE_scripts'
se_files = np.asarray(glob.glob(os.path.join(se_dir,"*.*")))
# Use custom-made gaussian PSF (cannot find the actual PSF)
psf_dir = '/home/vrg/filter_data/psf'
psf_names = ['%s.fits' % (f) for f in filters_to_analyze]
# A bit of oversampling:
psf_pix_arcsec = [0.262, 0.262, 0.262, 0.262, 0.262, 0.262, 0.262]
psf_truncate = [None, None, None, None, None, None, None]
psf_hdu_num = [0, 0, 0, 0, 0, 0, 0]
psf_fwhm = [1.31, 1.19, 1.31, 1.31, 1.11, 1.07, 1.02] # in arcsec
psf_files = []
for pname in psf_names:
psf_file = os.path.join(psf_dir,pname)
psf_files.append(psf_file)
print(psf_file, os.path.lexists(psf_file))
### PSFSTD; WFC3 = 0.06 arcsec, ACS = 0.03 arcsec... I think
### NIRCAM in header with keyword 'PIXELSCL'; short 0.07925 long 0.0162
## acs wfc 0.05 arcsec pixels... PSFSTD x4 oversample?
## wfc3 ir 0.13 arcsec
## wfc3 uv 0.04 arcsec
mockimage_parameters = ssimp.analysis_parameters('mockimage_default')
mockimage_parameters.filter_indices = filter_indices
mockimage_parameters.filter_labels = filter_labels
mockimage_parameters.pixsize_arcsec = pixsize_arcsec
mockimage_parameters.morphcode_base = morphcode_dir
mockimage_parameters.morphcode_files = morphcode_files
mockimage_parameters.se_base = se_dir
mockimage_parameters.se_files = se_files
mockimage_parameters.camera_indices = camindices #None #by default, do all
mockimage_parameters.psf_files = psf_files
mockimage_parameters.psf_pix_arcsec = psf_pix_arcsec
mockimage_parameters.psf_truncate = psf_truncate
mockimage_parameters.psf_hdu_num = psf_hdu_num
mockimage_parameters.magsb_limits = magsb_limits
mockimage_parameters.psf_fwhm_arcsec = psf_fwhm
mockimage_parameters.photfnu_Jy = photfnu_Jy
mockimage_parameters.filter_lambda_order = filter_lambda_order
mockimage_parameters.skip_filters = skip_filter_boolean
mockimage_parameters.use_nonscatter = use_nonscatter
#use exactly one detection and segmentation per object, depending on redshift
#enormous simplification
#observationally, go w deepest filter. here... ?
print(seg_filter_label)
print(mockimage_parameters.filter_labels)
mockimage_parameters.segment_filter_label = seg_filter_label
mockimage_parameters.segment_filter_index = np.where(np.asarray(mockimage_parameters.filter_labels) == seg_filter_label)[0][0]
print(mockimage_parameters.segment_filter_label)
print(mockimage_parameters.segment_filter_index)
assert(len(psf_pix_arcsec)==len(pixsize_arcsec))
assert(len(filter_labels)==len(mockimage_parameters.psf_files))
bbdirs = []
for i,bbfile in enumerate(bbfile_list):
bbdir = ssimp.process_single_broadband(bbfile, mockimage_parameters,
clobber=clobber, do_idl=do_idl, analyze=analyze,
bbase="broadbandz", Np=Np, zip_after=False)
bbdirs.append(bbdir)
#~ try:
#~ bbdir = ssimp.process_single_broadband(bbfile, mockimage_parameters,
#~ clobber=clobber, do_idl=do_idl, analyze=analyze,
#~ bbase="broadbandz", Np=Np)
#~ bbdirs.append(bbdir)
#~ except (KeyboardInterrupt,NameError,AttributeError,KeyError,TypeError,IndexError) as e:
#~ print(e)
#~ raise
#~ except:
#~ print("Exception while processing broadband: ", bbfile)
#~ print("Error:", sys.exc_info()[0])
#~ else:
#~ print("Successfully processed broadband: ", bbfile)
os.chdir(cwd)
return bbdirs
if __name__=="__main__":
# The 5 sigma depths in ABmags are 23.3, 23.2, 23.1, 22.3, 21.3 (grizy filters).
# For consistency with the rest of the code, we round these numbers.
# We also include a value of 24, for comparison purposes.
# Without dust
res = process_snapshot(subdirpath='.', seg_filter_label='ps1_i',
magsb_limits=[21, 22, 23, 24, 25], camindices=[0,1,2,3],
do_idl=False, analyze=True, use_nonscatter=True, Np=4)
#~ # Include dust
#~ res = process_snapshot(subdirpath='.', seg_filter_label='ps1_g',
#~ magsb_limits=[23.3, 23.2, 23.1, 22.3, 21.3], camindices=[0,1,2,3],
#~ do_idl=False, analyze=True, use_nonscatter=False, Np=4)
|
gpl-2.0
|
YinongLong/scikit-learn
|
sklearn/datasets/tests/test_svmlight_format.py
|
53
|
13398
|
from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import scipy.sparse as sp
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
X_sparse, y_dense = load_svmlight_file(datafile)
X_dense = X_sparse.toarray()
y_sparse = sp.csr_matrix(y_dense)
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
X_sliced = X_sparse[np.arange(X_sparse.shape[0])]
y_sliced = y_sparse[np.arange(y_sparse.shape[0])]
for X in (X_sparse, X_dense, X_sliced):
for y in (y_sparse, y_dense, y_sliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
if (sp.issparse(y) and y.shape[0] == 1):
# make sure y's shape is: (n_samples, n_labels)
# when it is sparse
y = y.T
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
X2_dense = X2.toarray()
if dtype == np.float32:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 4)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 4)
else:
# allow a rounding error at the last decimal place
assert_array_almost_equal(
X_dense.astype(dtype), X2_dense, 15)
assert_array_almost_equal(
y_dense.astype(dtype), y2, 15)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
y_sparse = sp.csr_matrix(y_dense)
for y in [y_dense, y_sparse]:
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
def test_load_with_long_qid():
# load svmfile with longint qid attribute
data = b("""
1 qid:0 0:1 1:2 2:3
0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985
0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985
3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""")
X, y, qid = load_svmlight_file(BytesIO(data), query_id=True)
true_X = [[1, 2, 3],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985],
[1440446648, 72048431380967004, 236784985]]
true_y = [1, 0, 0, 3]
trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807]
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=qid, zero_based=True)
f.seek(0)
X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
assert_array_equal(qid, trueQID)
f.seek(0)
X, y = load_svmlight_file(f, query_id=False, zero_based=True)
assert_array_equal(y, true_y)
assert_array_equal(X.toarray(), true_X)
|
bsd-3-clause
|
WafaaT/spark-tk
|
regression-tests/sparktkregtests/testcases/graph/graph_triangle_count_test.py
|
10
|
2503
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests triangle count for ATK against the networkx implementation"""
import unittest
import networkx as nx
from sparktkregtests.lib import sparktk_test
class TriangleCount(sparktk_test.SparkTKTestCase):
def test_triangle_counts(self):
"""Build frames and graphs to exercise"""
super(TriangleCount, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.graph = self.context.graph.create(self.vertices, self.frame)
result = self.graph.triangle_count()
triangles = result.to_pandas(result.count())
# Create a dictionary of triangle count per triangle:
dictionary_of_triangle_count = {vertex['id']: (vertex['count'])
for (index, vertex) in triangles.iterrows()}
edge_list = self.frame.take(
n=self.frame.count(), columns=['src', 'dst'])
# build the network x result
g = nx.Graph()
g.add_edges_from(edge_list)
triangle_counts_from_networkx = nx.triangles(g)
self.assertEqual(
dictionary_of_triangle_count, triangle_counts_from_networkx)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
davidpng/FCS_Database
|
FlowAnal/Feature_IO.py
|
1
|
11280
|
# -*- coding: utf-8 -*-
"""
Created on Wed 31 Dec 2014 05:54:41 AM PST
This file describes a HDF5 interface class for pushing and pulling 'binned' histograms
to an HDF5 file format
@author: David Ng, MD
"""
__author__ = "David Ng, MD"
__copyright__ = "Copyright 2014"
__license__ = "GPL v3"
__version__ = "1.0"
__maintainer__ = "David Ng"
__email__ = "[email protected]"
__status__ = "Production"
from scipy.sparse import csr_matrix
from HDF5_subroutines.HDF5_IO import HDF5_IO
import numpy as np
import pandas as pd
import h5py
import os
import logging
log = logging.getLogger(__name__)
class Feature_IO(HDF5_IO):
def __init__(self, filepath, clobber=False):
""" HDF5 input/output inferface
This class provides an inferface for pushing feature extracted sparse
histograms from an FCS object to an HDF5 file object and pulling this
data from an HDF5 object to an dense 'feature' dataframe for input to
Machine Learning Algorithms
Keyword Arguements:
filepath -- <str> Absolute filepath to an HDF5 file for reading and
writing
clobber -- <bool> Flag to overwrite a HDF5 file object
"""
HDF5_IO.__init__(self,filepath)
#self.filepath = filepath
if clobber is True and os.path.exists(filepath):
os.remove(filepath)
def make_single_tube_analysis(self, case_tube_list):
"""
This function will call a series of case_tube_idx as listed and merge
into a dense array that is the union of sparse matrix columns
outputs a tuple of:
merged features <pd.DataFrame>
not_in_data <list>
merge_failure <list>
"""
fh = h5py.File(self.filepath, 'a')
# error checking
not_in_data = set([str(x) for x in case_tube_list]) - set(fh['data'].keys())
not_in_data = [int(i) for i in not_in_data]
if not_in_data:
log.info("Some of the listed case_tubes are not in the dataset: {}".format(not_in_data))
cleaned_up_cti_list = [i for i in case_tube_list if i not in not_in_data]
if cleaned_up_cti_list == []:
raise ValueError("Aborting single tube analysis creation; provide cases do not \
exist in the data file")
# get union of indices
index_union = self.__get_check_merge_indices(cleaned_up_cti_list)
# intialize bin number dataframe AND merge dataframe
bin_num_df = pd.DataFrame(index_union,columns=['bin_num'])
merged = bin_num_df
merge_failure = []
for case in cleaned_up_cti_list:
#load FCS Feature Dataframe
try:
FCS_ft_df = self.__translate_FCS_feature(case)
# do a relation algebra join on the bin_number on the union of bin
# numbers and index/bin_number of the FCS_features
merged = pd.merge(merged,FCS_ft_df,how='left', \
left_on='bin_num',right_index=True)
except:
merge_failure.append(case)
merged.fillna(0,inplace=True) # clear out nan to zero
return merged, not_in_data, merge_failure
def push_fcs_features(self, case_tube_idx, FCS, db):
"""
This function will push the fcs features stored in CSR matrix form
to a given case_tube_idx as well as associated meta information
"""
self.schema = self.__make_schema(str(case_tube_idx))
fh = h5py.File(self.filepath, 'a')
self.__push_check_version(hdf_fh=fh, FCS=FCS, db=db)
# push sparse data into dir named for case_tube_idx
fh[self.schema['sdat']] = FCS.FCS_features.histogram.data
fh[self.schema['sidx']] = FCS.FCS_features.histogram.indices
fh[self.schema['sind']] = FCS.FCS_features.histogram.indptr
fh[self.schema['sshp']] = FCS.FCS_features.histogram.shape
fh.close()
def push_failed_cti_list(self, cti_list):
"""
This function will push a dataframe containing the case_num,
case_tube_idx and error message associated with inputed case_tube_idx
that failed feature extraction and/or push into HDF5 object
"""
meta_schema = self.__make_schema("MetaData")
self.push_DataFrame(DF = cti_list,
path = meta_schema["Case_Tube_Failures_DF"])
def get_fcs_features(self, case_tube_idx):
"""
This function will return the CSR matrix for a given case_tube_idx
"""
self.schema = self.__make_schema(str(case_tube_idx))
fh = h5py.File(self.filepath, 'r')
# get individual componets of the sparse array
d = fh[self.schema['sdat']].value
i = fh[self.schema['sidx']].value
p = fh[self.schema['sind']].value
s = fh[self.schema['sshp']].value
fh.close()
return csr_matrix((d,i,p),shape=s)
def get_case_tube_idxs(self):
"""This function returns the case_tube_indices present in the file
RETURN list of int()'s
"""
fh = h5py.File(self.filepath, 'r')
cti = [int(i) for i in fh['data'].keys()]
fh.close()
return cti
def get_meta_data(self):
"""
this function will load meta information into memory via a dictionary
keyed on the information name and values
"""
meta_schema = self.__make_schema("MetaData")
#create dictionary with meta info, won't use sparse matrix info to make it "MetaData"
csr_keys = ['sdat','sidx','sind','sshp']
#these are the sparse matrix keys to remove
meta_keys = [k for k in meta_schema.keys() if k not in csr_keys]
fh = h5py.File(self.filepath, 'r')
meta_data = {} # intialize empty dictionary and load it in for loop
for k in meta_keys:
try:
meta_data[k] = fh[meta_schema[k]].value
except AttributeError: #if the meta_schema name failed, try extracting with DF
try:
meta_data[k] = self.pull_DataFrame(meta_schema[k],ext_filehandle=fh)
except KeyError:
meta_data[k] = self.pull_Series(meta_schema[k],ext_filehandle=fh)
except:
raise ValueError("{} is undefined in the hdf5 object {}".format(
k, fh))
fh.close()
return meta_data
def __translate_FCS_feature(self,case_tube_idx):
"""
makes a dataframe containing the index and data information of the
original sparse matrix
"""
sparse_mtx = self.get_fcs_features(case_tube_idx)
return pd.DataFrame(data=sparse_mtx.data,
index=sparse_mtx.indices,
columns=[str(case_tube_idx)])
def __get_check_merge_indices(self, case_tube_list):
"""
This will return a list of the union of index positions for all
listed case_tube
"""
fh = h5py.File(self.filepath, 'a')
index = []
shape = []
for i in case_tube_list:
schema = self.__make_schema(str(i))
index.extend(fh[schema['sidx']].value.tolist())
shape.append(fh[schema['sshp']].value)
fh.close()
#check shape matches
areTrue = [shape[i]==shape[i-1] for i in range(1,len(shape))]
if not np.all(areTrue):
print np.array(shape)
raise "The length/shape of one case does not match the others"
else:
return np.sort(np.unique(np.array(index)))
def __push_check_version(self, hdf_fh, FCS, db):
"""
This internal function will check to see the header info the
hdf5 object/file is correct per the following logic
if exists and equal = good
if exists not equal = fail
if not exist, make and equal
Items used: FCS.version, FCS.FCS_features.type, db.date, db.db_file
"""
if self.schema['database_filepath'] in hdf_fh:
if hdf_fh[self.schema['database_filepath']].value != db.db_file:
raise ValueError('Filepaths do not match: %s <==> %s' %
(hdf_fh[self.schema['database_filepath']].value,
db.db_file))
else:
hdf_fh[self.schema['database_filepath']] = db.db_file
db_creation_date = db.creation_date.strftime("%Y-%m-%d") # HDF5 does not handle datetime
if self.schema['database_datetime'] in hdf_fh:
if hdf_fh[self.schema['database_datetime']].value != db_creation_date:
raise ValueError('DB dates do not match')
else:
hdf_fh[self.schema['database_datetime']] = db_creation_date
if self.schema['enviroment_version'] in hdf_fh:
if hdf_fh[self.schema['enviroment_version']].value != FCS.version:
raise ValueError('Evn versions do not match')
else:
hdf_fh[self.schema['enviroment_version']] = FCS.version
#chek/add Extraction type
if self.schema['extraction_type'] in hdf_fh:
if hdf_fh[self.schema['extraction_type']].value != FCS.FCS_features.type:
raise ValueError('Evn versions do not match')
else:
hdf_fh[self.schema['extraction_type']] = FCS.FCS_features.type
#check/add bin_descriptions
bin_desc = FCS.FCS_features.bin_description
if self.schema['bin_description'] in hdf_fh:
pass
#Error handling for these things is not working well, the pull returns objects
#rather than integers, will likely see this error with pull_DataFrame as well
#punt this problem for now!
#fh_bin_desc = self.pull_Series(path=self.schema['bin_description'],ext_filehandle=hdf_fh)
#print fh_bin_desc
#if not bin_desc.equals(fh_bin_desc):
# raise ValueError('Bin Descriptions do not match')
else:
self.push_Series(SR=bin_desc, path=self.schema['bin_description'],ext_filehandle=hdf_fh)
log.debug('Schema: %s' % ', '.join([i + '=' + str(hdf_fh[self.schema[i]].value)
for i in ['extraction_type', 'enviroment_version',
'database_datetime', 'database_filepath']]))
def __make_schema(self, case_tube_idx):
"""
makes a dictionary containing the storage schema
"""
schema = {"database_filepath": "/database_version/filepath",
"database_datetime": "/database_version/date",
"enviroment_version": "/enviroment_version",
"extraction_type": "/extraction_type",
"Case_Tube_Failures_DF": "/failed_cti",
"bin_description": "/bin_description/",
"sdat": "/data/"+case_tube_idx+"/data",
"sidx": "/data/"+case_tube_idx+"/indices",
"sind": "/data/"+case_tube_idx+"/indptr",
"sshp": "/data/"+case_tube_idx+"/shape"}
return schema
|
gpl-3.0
|
chreman/dramavis
|
superposter.py
|
2
|
8984
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# dramavis by frank fischer (@umblaetterer) & christopher kittel (@chris_kittel)
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import math
import numpy as np
from tqdm import tqdm
def plot_superposter(corpus, outputdir, debug=False):
"""
Plot harmonically layoutted drama network subplots in 16:9 format.
Node size by degree centrality,
edge size by log(weight+1).
"""
size = corpus.size
y = int(math.sqrt(size/2)*(16/9))
x = int(size/y)+1
fig = plt.figure(figsize=(160, 90))
gs = gridspec.GridSpec(x, y)
gs.update(wspace=0.0, hspace=0.00) # set the spacing between axes.
i = 0
# build rectangle in axis coords for text plotting
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
dramas = {drama.ID: drama
for drama in corpus.analyze_dramas(action=None)}
id2date = {drama.ID: drama.metadata.get("date_definite")
for drama in corpus.analyze_dramas(action=None)}
if debug:
print(id2date)
# http://pythoncentral.io/how-to-sort-python-dictionaries-by-key-or-value/
sorted_by_date = sorted(id2date, key=id2date.__getitem__)
for ID in sorted_by_date:
drama = dramas.get(ID)
if debug:
print(drama.metadata)
G = drama.G
try:
# for networks with only one node
d = nx.degree_centrality(G)
nodesize = [v * 110 for v in d.values()]
except:
nodesize = [1 * 110 for n in G.nodes()]
layout = nx.spring_layout
pos = layout(G)
ax = plt.subplot(gs[i])
ax.tick_params(color='white', labelcolor='white')
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['left'].set_color('white')
ax.spines['right'].set_color('white')
if "Goethe" in drama.metadata.get("author"):
ax.patch.set_facecolor('firebrick')
ax.patch.set_alpha(0.2)
if "Hebbel" in drama.metadata.get("author"):
ax.patch.set_facecolor('purple')
ax.patch.set_alpha(0.2)
if "Weißenthurn" in drama.metadata.get("author"):
ax.patch.set_facecolor('darkgreen')
ax.patch.set_alpha(0.2)
if "Schiller" in drama.metadata.get("author"):
ax.patch.set_facecolor('darkslategrey')
ax.patch.set_alpha(0.2)
if "Wedekind" in drama.metadata.get("author"):
ax.patch.set_facecolor('darkslateblue')
ax.patch.set_alpha(0.2)
if "Schnitzler" in drama.metadata.get("author"):
ax.patch.set_facecolor('tomato')
ax.patch.set_alpha(0.2)
node_color = "steelblue"
nx.draw_networkx_nodes(G, pos,
nodelist=G.nodes(),
node_color=node_color,
node_size=nodesize,
alpha=0.8)
weights = [math.log(G[u][v]['weight']+1)
for u, v in G.edges()]
edge_color = "grey"
nx.draw_networkx_edges(G, pos,
with_labels=False,
edge_color=edge_color,
width=weights)
title_bark = "".join([w[0] for w in drama.title.split()])
caption = ", ".join([drama.metadata.get("author").split(",")[0],
title_bark,
str(drama.metadata.get("date_definite"))])
ax.text(0.5*(left+right), 0*bottom, caption,
horizontalalignment='center',
verticalalignment='bottom',
fontsize=20, color='black',
transform=ax.transAxes)
ax.set_frame_on(True)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
i += 1
fig.savefig(os.path.join(outputdir, "superposter.svg"))
plt.close(fig)
def plot_quartett_poster(corpus, outputdir):
dramas = {drama.ID: drama
for drama in corpus.analyze_dramas(action="both")}
id2date = {drama.ID: drama.metadata.get("date_definite")
for drama in corpus.analyze_dramas(action=None)}
sorted_by_date = sorted(id2date, key=id2date.__getitem__)
x = 4
y = 8
fig = plt.figure(figsize=(80, 80))
outer = gridspec.GridSpec(x, y)
outer.update(wspace=0.06, hspace=0.06) # set the spacing between axes.
i = 0
for ID in tqdm(sorted_by_date, desc="Plotting"):
drama = dramas.get(ID)
inner = gridspec.GridSpecFromSubplotSpec(2, 1,
subplot_spec=outer[i],
wspace=0.0, hspace=0.0)
# inner = outer.new_subplotspec(i, rowspan=2)
# inner.update(wspace=0.125, hspace=0.125)
G = drama.G
# PLOT NETWORK
ax = plt.subplot(inner[0])
try:
# for networks with only one node
d = nx.degree_centrality(G)
nodesize = [v * 110 for v in d.values()]
except:
nodesize = [1 * 110 for n in G.nodes()]
layout = nx.spring_layout
pos = layout(G)
ax.tick_params(color='white', labelcolor='white')
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['left'].set_color('white')
ax.spines['right'].set_color('white')
node_color = "steelblue"
nx.draw_networkx_nodes(G, pos,
nodelist=G.nodes(),
node_color=node_color,
node_size=nodesize,
alpha=0.8)
weights = [math.log(G[u][v]['weight']+1)
for u, v in G.edges()]
edge_color = "grey"
nx.draw_networkx_edges(G, pos,
with_labels=False,
edge_color=edge_color,
width=weights)
title_bark = "".join([w[0] for w in drama.title.split()])
caption = ", ".join([drama.metadata.get("author").split(",")[0],
title_bark,
str(drama.metadata.get("date_definite"))])
ax.text(0.5, 0.0, caption,
horizontalalignment='center',
verticalalignment='top',
fontsize=30, color='black',
transform=ax.transAxes)
ax.set_frame_on(True)
ax.axes.get_yaxis().set_visible(False)
ax.axes.get_xaxis().set_visible(False)
fig.add_subplot(ax)
# PLOT TEXTBOX
text_ax = plt.subplot(inner[0])
# Autor*in – Titel – Untertitel – Jahr
metadata = ['author', 'title', 'subtitle', 'date_definite']
metadata = [": ".join([md, str(drama.metadata.get(md))])
for md in metadata]
metadata = "\n".join(metadata)
# Anzahl von Subgraphen – Netzwerkgröße – Netzwerkdichte –
# Clustering-Koeffizient – Durchschnittliche Pfadlänge –
# Höchster Degreewert und Name der entsprechenden Figur, all-in index
metrics = ['charcount', 'density', 'connected_components',
'clustering_coefficient', 'avgpathlength', 'diameter',
'all_in_index']
metric_strings = []
for metric in metrics:
v = drama.graph_metrics.loc[drama.ID][metric]
if type(v) == int:
value = "%.d" % v
if type(v) == float:
value = "%.2f" % v
if metric == "all_in_index":
v = int(v*100)
value = "%.d" % v +"%"
else:
value = str(value)
metric_strings.append(": ".join([metric, value]))
max_degree = drama.graph_metrics.loc[drama.ID]['maxdegree']
cent_max = drama.centralities['degree'].max()
top_char = drama.centralities[drama.centralities['degree'] == cent_max].index.tolist()
if len(top_char) != 1:
max_degree_char = "SEVERAL"
else:
max_degree_char = top_char[0]
metric_strings.append('max_degree: %.d (%s)' % (max_degree, max_degree_char))
metric_strings = "\n".join(metric_strings)
text_ax.text(0, -0.15, metadata+"\n"+"\n"+metric_strings,
ha='left', va="top",
wrap=True, transform=text_ax.transAxes,
fontsize=20)
text_ax.set_frame_on(True)
text_ax.axes.get_yaxis().set_visible(False)
text_ax.axes.get_xaxis().set_visible(False)
fig.add_subplot(text_ax)
i += 1
# plt.tight_layout()
fig.savefig(os.path.join(outputdir, "quartettposter.svg"))
plt.close(fig)
|
mit
|
wrobstory/seaborn
|
seaborn/categorical.py
|
19
|
102299
|
from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from .external.six.moves import range
from . import utils
from .utils import desaturate, iqr, categorical_order
from .algorithms import bootstrap
from .palettes import color_palette, husl_palette, light_palette
from .axisgrid import FacetGrid, _facet_docs
class _CategoricalPlotter(object):
width = .8
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except ValueError:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for input in [x, y, hue, units]:
if isinstance(input, string_types):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = np.asarray(grouped_vals.get_group(g))
except KeyError:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7)
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
colors = light_palette(color, n_colors)
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Conver the colors to a common rgb representation
colors = [mpl.colors.colorConverter.to_rgb(c) for c in colors]
# Desaturate a bit because these are patches
if saturation < 1:
colors = [desaturate(c, saturation) for c in colors]
# Determine the gray color to use for the lines framing the plot
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in colors]
l = min(light_vals) * .6
gray = (l, l, l)
# Assign object attributes
self.colors = colors
self.gray = gray
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesnt exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except AttributeError:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
def is_not_numeric(s):
try:
np.asarray(s, dtype=np.float)
except ValueError:
return True
return False
no_numeric = "Neither the `x` nor `y` variable appears to be numeric."
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
if is_categorical(x):
raise ValueError(no_numeric)
else:
return "h"
elif is_not_numeric(y):
if is_not_numeric(x):
raise ValueError(no_numeric)
else:
return "h"
else:
return "v"
@property
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
return offsets
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names) * .98
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
def add_legend_data(self, ax, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([0, 0], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label)
ax.add_patch(rect)
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, kws)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], kws)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, kws):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(color=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(kws.get("boxprops", {}))
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(kws.get("whiskerprops", {}))
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(kws.get("capprops", {}))
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(kws.get("medianprops", {}))
for fly in artist_dict["fliers"]:
fly.update(dict(color=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(kws.get("flierprops", {}))
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
raise ValueError("Cannot use `split` with more than 2 hue levels.")
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
color=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["color"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, support, density, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * iqr(data)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
c=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _StripPlotter(_CategoricalPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
kws.setdefault("zorder", 2.1)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Determine the positions of the points
strip_data = remove_na(group_data)
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[i]
# Draw the plot
if self.orient == "v":
ax.scatter(i + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, i + jitter, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
# Determine the positions of the points
strip_data = remove_na(group_data[hue_mask])
pos = i + offsets[j] if self.split else i
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[j]
# Only label one set of plots
if i:
kws.pop("label", None)
else:
kws["label"] = hue_level
# Draw the plot
if self.orient == "v":
ax.scatter(pos + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, pos + jitter, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _SwarmPlotter(_BoxPlotter):
def __init__(self):
pass
def plot(self, ax):
pass
class _CategoricalStatPlotter(_CategoricalPlotter):
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names)
def estimate_statistic(self, estimator, ci, n_boot):
if self.hue_names is None:
statistic = []
confint = []
else:
statistic = [[] for _ in self.plot_data]
confint = [[] for _ in self.plot_data]
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single layer of grouping
# --------------------------------------------
if self.plot_hues is None:
if self.plot_units is None:
stat_data = remove_na(group_data)
unit_data = None
else:
unit_data = self.plot_units[i]
have = pd.notnull(np.c_[group_data, unit_data]).all(axis=1)
stat_data = group_data[have]
unit_data = unit_data[have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic.append(np.nan)
else:
statistic.append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint.append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint.append(utils.ci(boots, ci))
# Option 2: we are grouping by a hue layer
# ----------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
if not self.plot_hues[i].size:
statistic[i].append(np.nan)
if ci is not None:
confint[i].append((np.nan, np.nan))
continue
hue_mask = self.plot_hues[i] == hue_level
if self.plot_units is None:
stat_data = remove_na(group_data[hue_mask])
unit_data = None
else:
group_units = self.plot_units[i]
have = pd.notnull(
np.c_[group_data, group_units]
).all(axis=1)
stat_data = group_data[hue_mask & have]
unit_data = group_units[hue_mask & have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic[i].append(np.nan)
else:
statistic[i].append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint[i].append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint[i].append(utils.ci(boots, ci))
# Save the resulting values for plotting
self.statistic = np.array(statistic)
self.confint = np.array(confint)
# Rename the value label to reflect the estimation
if self.value_label is not None:
self.value_label = "{}({})".format(estimator.__name__,
self.value_label)
def draw_confints(self, ax, at_group, confint, colors, **kws):
kws.setdefault("lw", mpl.rcParams["lines.linewidth"] * 1.8)
for at, (ci_low, ci_high), color in zip(at_group,
confint,
colors):
if self.orient == "v":
ax.plot([at, at], [ci_low, ci_high], color=color, **kws)
else:
ax.plot([ci_low, ci_high], [at, at], color=color, **kws)
class _BarPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with bars."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation, errcolor):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, ci, n_boot)
self.errcolor = errcolor
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax, barpos, self.confint, errcolors)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with (joined) points."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, ci, n_boot)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, string_types):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, string_types):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
@property
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors, lw=lw)
# Draw the estimate points
marker = self.markers[0]
if self.orient == "h":
ax.scatter(self.statistic, pointpos,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
ax.scatter(pointpos, self.statistic,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
zorder=z, lw=lw)
# Draw the estimate points
marker = self.markers[j]
if self.orient == "h":
ax.scatter(statistic, offpos, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
else:
ax.scatter(offpos, statistic, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
_categorical_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- Vectors of data represented as lists, numpy arrays, or pandas Series
objects passed directly to the ``x``, ``y``, and/or ``hue`` parameters.
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DataFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
In most cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
input_params=dedent("""\
x, y, hue : names of variables in ``data`` or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.\
"""),
string_input_params=dedent("""\
x, y, hue : names of variables in ``data``
Inputs for plotting long-form data. See examples for interpretation.\
"""),
categorical_data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.\
"""),
long_form_data=dedent("""\
data : DataFrame
Long-form (tidy) dataset for plotting. Each column should correspond
to a variable, and each row should correspond to an observation.\
"""),
order_vars=dedent("""\
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
stat_api_params=dedent("""\
estimator : callable that maps vector -> scalar, optional
Statistical function to estimate within each categorical bin.
ci : float or None, optional
Size of confidence intervals to draw around estimated values. If
``None``, no bootstrapping will be performed, and error bars will
not be drawn.
n_boot : int, optional
Number of bootstrap iterations to use when computing confidence
intervals.
units : name of variable in ``data`` or vector data, optional
Identifier of sampling units, which will be used to perform a
multilevel bootstrap and account for repeated measures design.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This is usually
inferred from the dtype of the input variables, but can be used to
specify when the "categorical" variable is a numeric or when plotting
wide-form data.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable. If the palette is a dictionary, keys should be names of
levels and values should be matplotlib colors.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with a other plots to show each observation.\
"""),
barplot=dedent("""\
barplot : Show point estimates and confidence intervals using bars.\
"""),
countplot=dedent("""\
countplot : Show the counts of observations in each categorical bin.\
"""),
pointplot=dedent("""\
pointplot : Show point estimates and confidence intervals using scatterplot
glyphs.\
"""),
factorplot=dedent("""\
factorplot : Combine categorical plots and a class:`FacetGrid`.\
"""),
)
_categorical_docs.update(_facet_docs)
def boxplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, fliersize=5, linewidth=None, whis=1.5, notch=False,
ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
if "names" in kwargs:
kwargs.pop("names")
warn = True
if "join_rm" in kwargs:
kwargs.pop("join_rm")
warn = True
msg = ("The boxplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis, notch=notch))
plotter.plot(ax, kwargs)
return ax
boxplot.__doc__ = dedent("""\
Draw a box plot to show distributions with respect to categories.
A box plot (or box-and-whisker plot) shows the distribution of quantitative
data in a way that facilitates comparisons between variables or across
levels of a categorical variable. The box shows the quartiles of the
dataset while the whiskers extend to show the rest of the distribution,
except for points that are determined to be "outliers" using a method
that is a function of the inter-quartile range.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips.sort("size"))
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def violinplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, orient=None, linewidth=None,
color=None, palette=None, saturation=.75, ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
msg = ("The violinplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a box plot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Control violin order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips.sort("size"))
Control violin order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins:
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def stripplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
jitter=False, split=True, orient=None, color=None, palette=None,
size=7, edgecolor="w", linewidth=1, ax=None, **kwargs):
plotter = _StripPlotter(x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.update(dict(s=size ** 2, edgecolor=edgecolor, linewidth=linewidth))
if edgecolor == "gray":
kwargs["edgecolor"] = plotter.gray
plotter.plot(ax, kwargs)
return ax
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True)
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at the same location on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=False)
Control strip order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips.sort("size"))
Control strip order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.stripplot(x="tip", y="day", data=tips, jitter=True)
Draw strips of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... jitter=True, color="white", edgecolor="gray")
""").format(**_categorical_docs)
def barplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
orient=None, color=None, palette=None, saturation=.75,
errcolor=".26", ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
barplot.__doc__ = dedent("""\
Show point estimates and confidence intervals as rectangular bars.
A bar plot represents an estimate of central tendency for a numeric
variable with the height of each rectangle and provides some indication of
the uncertainty around that estimate using error bars. Bar plots include 0
in the quantitative axis range, and they are a good choice when 0 is a
meaningful value for the quantitative variable, and you want to make
comparisons against it.
For datasets where 0 is not a meaningful value, a point plot will allow you
to focus on differences between levels of one or more categorical
variables.
It is also important to keep in mind that a bar plot shows only the mean
(or other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
{orient}
{color}
{palette}
{saturation}
errcolor : matplotlib color
Color for the lines that represent the confidence interval.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.bar`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{countplot}
{pointplot}
{factorplot}
Examples
--------
Draw a set of vertical bar plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
Draw a set of vertical bars with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
Draw a set of horizontal bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="tip", y="day", data=tips)
Control bar order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips.sort("size"))
Control bar order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.barplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, ci=68)
Use a different color palette for the bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... palette="Blues_d")
Plot all bars in a single color:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... color="salmon", saturation=.5)
Use ``plt.bar`` keyword arguments to further change the aesthetic:
.. plot::
:context: close-figs
>>> ax = sns.barplot("day", "total_bill", data=tips,
... linewidth=2.5, facecolor=(1, 1, 1, 0),
... errcolor=".2", edgecolor=".2")
""").format(**_categorical_docs)
def pointplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
markers="o", linestyles="-", dodge=False, join=True, scale=1,
orient=None, color=None, palette=None, ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _PointPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
pointplot.__doc__ = dedent("""\
Show point estimates and confidence intervals using scatter plot glyphs.
A point plot represents an estimate of central tendency for a numeric
variable by the position of scatter plot points and provides some
indication of the uncertainty around that estimate using error bars.
Point plots can be more useful than bar plots for focusing comparisons
between different levels of one or more categorical variables. They are
particularly adept at showing interactions: how the relationship between
levels of one categorical variable changes across levels of a second
categorical variable. The lines that join each point from the same ``hue``
level allow interactions to be judged by differences in slope, which is
easier for the eyes than comparing the heights of several groups of points
or bars.
It is important to keep in mind that a point plot shows only the mean (or
other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
markers : string or list of strings, optional
Markers to use for each of the ``hue`` levels.
linestyles : string or list of strings, optional
Line styles to use for each of the ``hue`` levels.
dodge : bool or float, optional
Amount to separate the points for each level of the ``hue`` variable
along the categorical axis.
join : bool, optional
If ``True``, lines will be drawn between point estimates at the same
``hue`` level.
scale : float, optional
Scale factor for the plot elements.
{orient}
{color}
{palette}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Draw a set of vertical point plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
Draw a set of vertical points with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips)
Separate the points for different hue levels along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, dodge=True)
Use a different marker and line style for the hue levels:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips,
... markers=["o", "x"],
... linestyles=["-", "--"])
Draw a set of horizontal points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips)
Don't draw a line connecting each point:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips, join=False)
Use a different color for a single-layer plot:
.. plot::
:context: close-figs
>>> ax = sns.pointplot("time", y="total_bill", data=tips,
... color="#bb3f3f")
Use a different color palette for the points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, palette="Set2")
Control point order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips.sort("size"))
Control point order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.pointplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, ci=68)
""").format(**_categorical_docs)
def countplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
ax=None, **kwargs):
estimator = len
ci = None
n_boot = 0
units = None
errcolor = None
if x is None and y is not None:
orient = "h"
x = y
elif y is None and x is not None:
orient = "v"
y = x
elif x is not None and y is not None:
raise TypeError("Cannot pass values for both `x` and `y`")
else:
raise TypeError("Must pass valus for either `x` or `y`")
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
plotter.value_label = "count"
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
countplot.__doc__ = dedent("""\
Show the counts of observations in each categorical bin using bars.
A count plot can be thought of as a histogram across a categorical, instead
of quantitative, variable. The basic API and options are identical to those
for :func:`barplot`, so you can compare counts across nested variables.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed to ``plt.bar``.
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Show value counts for a single categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="darkgrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.countplot(x="class", data=titanic)
Show value counts for two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="class", hue="who", data=titanic)
Plot the bars horizontally:
.. plot::
:context: close-figs
>>> ax = sns.countplot(y="class", hue="who", data=titanic)
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic, palette="Set3")
Use ``plt.bar`` keyword arguments for a different look:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic,
... facecolor=(0, 0, 0, 0),
... linewidth=5,
... edgecolor=sns.color_palette("dark", 3))
""").format(**_categorical_docs)
def factorplot(x=None, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, order=None, hue_order=None, row_order=None,
col_order=None, kind="point", size=4, aspect=1,
orient=None, color=None, palette=None,
legend=True, legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except KeyError:
err = "Plot kind '{}' is not recognized".format(kind)
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for count plots")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
p = _CategoricalPlotter()
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
order = p.group_names
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, size=size, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
plot_kws.update(
estimator=estimator, ci=ci, n_boot=n_boot, units=units,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x, y, hue, **plot_kws)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(str, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g
factorplot.__doc__ = dedent("""\
Draw a categorical plot onto a FacetGrid.
The default plot that is shown is a point plot, but other seaborn
categorical plots can be chosen with the ``kind`` parameter, including
box plots, violin plots, bar plots, or strip plots.
It is important to choose how variables get mapped to the plot structure
such that the most important comparisons are easiest to make. As a general
rule, it is easier to compare positions that are closer together, so the
``hue`` variable should be used for the most important comparisons. For
secondary comparisons, try to share the quantitative axis (so, use ``col``
for vertical plots and ``row`` for horizontal plots). Note that, although
it is possible to make rather complex plots using this function, in many
cases you may be better served by created several smaller and more focused
plots than by trying to stuff many comparisons into one figure.
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, ``hue``, and other parameters.
As in the case with the underlying plot functions, if variables have a
``categorical`` data type, the correct orientation of the plot elements,
the levels of the categorical variables, and their order will be inferred
from the objects. Otherwise you may have to use the function parameters
(``orient``, ``order``, ``hue_order``, etc.) to set up the plot correctly.
Parameters
----------
{string_input_params}
{long_form_data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
{col_wrap}
{stat_api_params}
{order_vars}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
kind : {{``point``, ``bar``, ``count``, ``box``, ``violin``, ``strip``}}
The kind of plot to draw.
{size}
{aspect}
{orient}
{color}
{palette}
legend : bool, optional
If ``True`` and there is a ``hue`` variable, draw a legend on the plot.
{legend_out}
{share_xy}
{margin_titles}
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : :class:`FacetGrid`
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> exercise = sns.load_dataset("exercise")
>>> g = sns.factorplot(x="time", y="pulse", hue="kind", data=exercise)
Use a different plot kind to visualize the same data:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... data=exercise, kind="violin")
Facet along the columns to show a third categorical variable:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise)
Use a different size and aspect ratio for the facets:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise,
... size=5, aspect=.8)
Make many column facets and wrap them into the rows of the grid:
.. plot::
:context: close-figs
>>> titanic = sns.load_dataset("titanic")
>>> g = sns.factorplot("alive", col="deck", col_wrap=4,
... data=titanic[titanic.deck.notnull()],
... kind="count", size=2.5, aspect=.8)
Plot horizontally and pass other keyword arguments to the plot function:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="age", y="embark_town",
... hue="sex", row="class",
... data=titanic[titanic.embark_town.notnull()],
... orient="h", size=2, aspect=3.5, palette="Set3",
... kind="violin", split=True, cut=0, bw=.2)
Use methods on the returned :class:`FacetGrid` to tweak the presentation:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="who", y="survived", col="class",
... data=titanic, saturation=.5,
... kind="bar", ci=None, aspect=.6)
>>> (g.set_axis_labels("", "Survival Rate")
... .set_xticklabels(["Men", "Women", "Children"])
... .set_titles("{{col_name}} {{col_var}}")
... .set(ylim=(0, 1))
... .despine(left=True)) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
|
bsd-3-clause
|
idlead/scikit-learn
|
sklearn/cluster/__init__.py
|
364
|
1228
|
"""
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
|
bsd-3-clause
|
tdhopper/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
ilo10/scikit-learn
|
sklearn/gaussian_process/gaussian_process.py
|
83
|
34544
|
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = check_array(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = check_array(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = check_array(self.thetaL)
self.thetaU = check_array(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
|
bsd-3-clause
|
njoubert/ardupilot
|
Tools/LogAnalyzer/tests/TestOptFlow.py
|
21
|
14959
|
from LogAnalyzer import Test,TestResult
import DataflashLog
import math
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_ENABLE to 1 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
|
gpl-3.0
|
mandarup/multi-object-tracking
|
sort.py
|
1
|
12062
|
"""
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016 Alex Bewley [email protected]
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
from numba import jit
import os.path
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io
from sklearn.utils.linear_assignment_ import linear_assignment
import glob
import time
import argparse
from filterpy.kalman import KalmanFilter
@jit
def iou(bb_test, bb_gt):
"""
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
"""
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])
+ (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)
return(o)
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x, score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if(score is None):
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] +
w / 2., x[1] + h / 2.]).reshape((1, 4))
else:
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] +
w / 2., x[1] + h / 2., score]).reshape((1, 5))
class KalmanBoxTracker(object):
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self, bbox):
"""
Initialises a tracker using initial bounding box.
"""
# define constant velocity model
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1]])
self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]])
self.kf.R[2:, 2:] *= 10.
self.kf.P[4:, 4:] *= 1000. # give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1, -1] *= 0.01
self.kf.Q[4:, 4:] *= 0.01
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
def update(self, bbox):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if((self.kf.x[6] + self.kf.x[2]) <= 0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if(self.time_since_update > 0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return convert_x_to_bbox(self.kf.x)
def associate_detections_to_trackers(detections, trackers, iou_threshold=0.3):
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers) == 0):
return np.empty((0, 2), dtype=int), np.arange(
len(detections)), np.empty((0, 5), dtype=int)
iou_matrix = np.zeros((len(detections), len(trackers)), dtype=np.float32)
for d, det in enumerate(detections):
for t, trk in enumerate(trackers):
iou_matrix[d, t] = iou(det, trk)
matched_indices = linear_assignment(-iou_matrix)
unmatched_detections = []
for d, det in enumerate(detections):
if(d not in matched_indices[:, 0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if(t not in matched_indices[:, 1]):
unmatched_trackers.append(t)
# filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0], m[1]] < iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1, 2))
if(len(matches) == 0):
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(
unmatched_detections), np.array(unmatched_trackers)
class Sort(object):
def __init__(self, max_age=1, min_hits=3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self, dets):
"""
Params:
dets - a numpy array of detections in the format [[x,y,w,h,score],[x,y,w,h,score],...]
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
self.frame_count += 1
# get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers), 5))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(
dets, trks)
# update matched trackers with assigned detections
for t, trk in enumerate(self.trackers):
if(t not in unmatched_trks):
d = matched[np.where(matched[:, 1] == t)[0], 0]
trk.update(dets[d, :][0])
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i, :])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()[0]
if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
# +1 as MOT benchmark requires positive
ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1))
i -= 1
# remove dead tracklet
if(trk.time_since_update > self.max_age):
self.trackers.pop(i)
if(len(ret) > 0):
return np.concatenate(ret)
return np.empty((0, 5))
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='SORT demo')
parser.add_argument(
'--display',
dest='display',
help='Display online tracker output (slow) [False]',
action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
# all train
sequences = [
'PETS09-S2L1',
'TUD-Campus',
'TUD-Stadtmitte',
'ETH-Bahnhof',
'ETH-Sunnyday',
'ETH-Pedcross2',
'KITTI-13',
'KITTI-17',
'ADL-Rundle-6',
'ADL-Rundle-8',
'Venice-2']
args = parse_args()
display = args.display
phase = 'train'
total_time = 0.0
total_frames = 0
colours = np.random.rand(32, 3) # used only for display
if(display):
if not os.path.exists('mot_benchmark'):
print('\n\tERROR: mot_benchmark link not found!\n\n Create a symbolic link to the MOT benchmark\n (https://motchallenge.net/data/2D_MOT_2015/#download). E.g.:\n\n $ ln -s /path/to/MOT2015_challenge/2DMOT2015 mot_benchmark\n\n')
exit()
plt.ion()
fig = plt.figure()
if not os.path.exists('output'):
os.makedirs('output')
for seq in sequences:
mot_tracker = Sort() # create instance of the SORT tracker
seq_dets = np.loadtxt(
'data/%s/det.txt' %
(seq), delimiter=',') # load detections
with open('output/%s.txt' % (seq), 'w') as out_file:
print("Processing %s." % (seq))
for frame in range(int(seq_dets[:, 0].max())):
frame += 1 # detection and frame numbers begin at 1
dets = seq_dets[seq_dets[:, 0] == frame, 2:7]
# convert to [x1,y1,w,h] to [x1,y1,x2,y2]
dets[:, 2:4] += dets[:, 0:2]
total_frames += 1
if(display):
ax1 = fig.add_subplot(111, aspect='equal')
fn = 'mot_benchmark/%s/%s/img1/%06d.jpg' % (
phase, seq, frame)
im = io.imread(fn)
ax1.imshow(im)
plt.title(seq + ' Tracked Targets')
start_time = time.time()
trackers = mot_tracker.update(dets)
cycle_time = time.time() - start_time
total_time += cycle_time
for d in trackers:
print(
'%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' %
(frame, d[4], d[0], d[1], d[2] - d[0], d[3] - d[1]), file=out_file)
if(display):
d = d.astype(np.int32)
ax1.add_patch(patches.Rectangle(
(d[0], d[1]), d[2] - d[0], d[3] - d[1], fill=False, lw=3, ec=colours[d[4] % 32, :]))
ax1.set_adjustable('box-forced')
if(display):
fig.canvas.flush_events()
plt.draw()
ax1.cla()
print("Total Tracking took: %.3f for %d frames or %.1f FPS" %
(total_time, total_frames, total_frames / total_time))
if(display):
print("Note: to get real runtime results run without the option: --display")
|
gpl-3.0
|
ltiao/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits_active_learning.py
|
294
|
3417
|
"""
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
|
bsd-3-clause
|
Eric89GXL/mne-python
|
tutorials/preprocessing/plot_30_filtering_resampling.py
|
7
|
13908
|
# -*- coding: utf-8 -*-
"""
.. _tut-filter-resample:
Filtering and resampling data
=============================
This tutorial covers filtering and resampling, and gives examples of how
filtering can be used for artifact repair.
.. contents:: Page contents
:local:
:depth: 2
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. We'll also crop the data to 60 seconds
(to save memory on the documentation server):
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(0, 60).load_data() # use just 60 seconds of data, to save memory
###############################################################################
# Background on filtering
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# A filter removes or attenuates parts of a signal. Usually, filters act on
# specific *frequency ranges* of a signal — for example, suppressing all
# frequency components above or below a certain cutoff value. There are *many*
# ways of designing digital filters; see :ref:`disc-filtering` for a longer
# discussion of the various approaches to filtering physiological signals in
# MNE-Python.
#
#
# Repairing artifacts by filtering
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Artifacts that are restricted to a narrow frequency range can sometimes
# be repaired by filtering the data. Two examples of frequency-restricted
# artifacts are slow drifts and power line noise. Here we illustrate how each
# of these can be repaired by filtering.
#
#
# Slow drifts
# ~~~~~~~~~~~
#
# Low-frequency drifts in raw data can usually be spotted by plotting a fairly
# long span of data with the :meth:`~mne.io.Raw.plot` method, though it is
# helpful to disable channel-wise DC shift correction to make slow drifts
# more readily visible. Here we plot 60 seconds, showing all the magnetometer
# channels:
mag_channels = mne.pick_types(raw.info, meg='mag')
raw.plot(duration=60, order=mag_channels, proj=False,
n_channels=len(mag_channels), remove_dc=False)
###############################################################################
# A half-period of this slow drift appears to last around 10 seconds, so a full
# period would be 20 seconds, i.e., :math:`\frac{1}{20} \mathrm{Hz}`. To be
# sure those components are excluded, we want our highpass to be *higher* than
# that, so let's try :math:`\frac{1}{10} \mathrm{Hz}` and :math:`\frac{1}{5}
# \mathrm{Hz}` filters to see which works best:
for cutoff in (0.1, 0.2):
raw_highpass = raw.copy().filter(l_freq=cutoff, h_freq=None)
fig = raw_highpass.plot(duration=60, order=mag_channels, proj=False,
n_channels=len(mag_channels), remove_dc=False)
fig.subplots_adjust(top=0.9)
fig.suptitle('High-pass filtered at {} Hz'.format(cutoff), size='xx-large',
weight='bold')
###############################################################################
# Looks like 0.1 Hz was not quite high enough to fully remove the slow drifts.
# Notice that the text output summarizes the relevant characteristics of the
# filter that was created. If you want to visualize the filter, you can pass
# the same arguments used in the call to :meth:`raw.filter()
# <mne.io.Raw.filter>` above to the function :func:`mne.filter.create_filter`
# to get the filter parameters, and then pass the filter parameters to
# :func:`mne.viz.plot_filter`. :func:`~mne.filter.create_filter` also requires
# parameters ``data`` (a :class:`NumPy array <numpy.ndarray>`) and ``sfreq``
# (the sampling frequency of the data), so we'll extract those from our
# :class:`~mne.io.Raw` object:
filter_params = mne.filter.create_filter(raw.get_data(), raw.info['sfreq'],
l_freq=0.2, h_freq=None)
###############################################################################
# Notice that the output is the same as when we applied this filter to the data
# using :meth:`raw.filter() <mne.io.Raw.filter>`. You can now pass the filter
# parameters (and the sampling frequency) to :func:`~mne.viz.plot_filter` to
# plot the filter:
mne.viz.plot_filter(filter_params, raw.info['sfreq'], flim=(0.01, 5))
###############################################################################
# .. _tut-section-line-noise:
#
# Power line noise
# ~~~~~~~~~~~~~~~~
#
# Power line noise is an environmental artifact that manifests as persistent
# oscillations centered around the `AC power line frequency`_. Power line
# artifacts are easiest to see on plots of the spectrum, so we'll use
# :meth:`~mne.io.Raw.plot_psd` to illustrate. We'll also write a little
# function that adds arrows to the spectrum plot to highlight the artifacts:
def add_arrows(axes):
# add some arrows at 60 Hz and its harmonics
for ax in axes:
freqs = ax.lines[-1].get_xdata()
psds = ax.lines[-1].get_ydata()
for freq in (60, 120, 180, 240):
idx = np.searchsorted(freqs, freq)
# get ymax of a small region around the freq. of interest
y = psds[(idx - 4):(idx + 5)].max()
ax.arrow(x=freqs[idx], y=y + 18, dx=0, dy=-12, color='red',
width=0.1, head_width=3, length_includes_head=True)
fig = raw.plot_psd(fmax=250, average=True)
add_arrows(fig.axes[:2])
###############################################################################
# It should be evident that MEG channels are more susceptible to this kind of
# interference than EEG that is recorded in the magnetically shielded room.
# Removing power-line noise can be done with a notch filter,
# applied directly to the :class:`~mne.io.Raw` object, specifying an array of
# frequencies to be attenuated. Since the EEG channels are relatively
# unaffected by the power line noise, we'll also specify a ``picks`` argument
# so that only the magnetometers and gradiometers get filtered:
meg_picks = mne.pick_types(raw.info, meg=True)
freqs = (60, 120, 180, 240)
raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks)
for title, data in zip(['Un', 'Notch '], [raw, raw_notch]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
###############################################################################
# :meth:`~mne.io.Raw.notch_filter` also has parameters to control the notch
# width, transition bandwidth and other aspects of the filter. See the
# docstring for details.
#
# It's also possible to try to use a spectrum fitting routine to notch filter.
# In principle it can automatically detect the frequencies to notch, but our
# implementation generally does not do so reliably, so we specify the
# frequencies to remove instead, and it does a good job of removing the
# line noise at those frequencies:
raw_notch_fit = raw.copy().notch_filter(
freqs=freqs, picks=meg_picks, method='spectrum_fit', filter_length='10s')
for title, data in zip(['Un', 'spectrum_fit '], [raw, raw_notch_fit]):
fig = data.plot_psd(fmax=250, average=True)
fig.subplots_adjust(top=0.85)
fig.suptitle('{}filtered'.format(title), size='xx-large', weight='bold')
add_arrows(fig.axes[:2])
###############################################################################
# Resampling
# ^^^^^^^^^^
#
# EEG and MEG recordings are notable for their high temporal precision, and are
# often recorded with sampling rates around 1000 Hz or higher. This is good
# when precise timing of events is important to the experimental design or
# analysis plan, but also consumes more memory and computational resources when
# processing the data. In cases where high-frequency components of the signal
# are not of interest and precise timing is not needed (e.g., computing EOG or
# ECG projectors on a long recording), downsampling the signal can be a useful
# time-saver.
#
# In MNE-Python, the resampling methods (:meth:`raw.resample()
# <mne.io.Raw.resample>`, :meth:`epochs.resample() <mne.Epochs.resample>` and
# :meth:`evoked.resample() <mne.Evoked.resample>`) apply a low-pass filter to
# the signal to avoid `aliasing`_, so you don't need to explicitly filter it
# yourself first. This built-in filtering that happens when using
# :meth:`raw.resample() <mne.io.Raw.resample>`, :meth:`epochs.resample()
# <mne.Epochs.resample>`, or :meth:`evoked.resample() <mne.Evoked.resample>` is
# a brick-wall filter applied in the frequency domain at the `Nyquist
# frequency`_ of the desired new sampling rate. This can be clearly seen in the
# PSD plot, where a dashed vertical line indicates the filter cutoff; the
# original data had an existing lowpass at around 172 Hz (see
# ``raw.info['lowpass']``), and the data resampled from 600 Hz to 200 Hz gets
# automatically lowpass filtered at 100 Hz (the `Nyquist frequency`_ for a
# target rate of 200 Hz):
raw_downsampled = raw.copy().resample(sfreq=200)
for data, title in zip([raw, raw_downsampled], ['Original', 'Downsampled']):
fig = data.plot_psd(average=True)
fig.subplots_adjust(top=0.9)
fig.suptitle(title)
plt.setp(fig.axes, xlim=(0, 300))
###############################################################################
# Because resampling involves filtering, there are some pitfalls to resampling
# at different points in the analysis stream:
#
# - Performing resampling on :class:`~mne.io.Raw` data (*before* epoching) will
# negatively affect the temporal precision of Event arrays, by causing
# `jitter`_ in the event timing. This reduced temporal precision will
# propagate to subsequent epoching operations.
#
# - Performing resampling *after* epoching can introduce edge artifacts *on
# every epoch*, whereas filtering the :class:`~mne.io.Raw` object will only
# introduce artifacts at the start and end of the recording (which is often
# far enough from the first and last epochs to have no affect on the
# analysis).
#
# The following section suggests best practices to mitigate both of these
# issues.
#
#
# Best practices
# ~~~~~~~~~~~~~~
#
# To avoid the reduction in temporal precision of events that comes with
# resampling a :class:`~mne.io.Raw` object, and also avoid the edge artifacts
# that come with filtering an :class:`~mne.Epochs` or :class:`~mne.Evoked`
# object, the best practice is to:
#
# 1. low-pass filter the :class:`~mne.io.Raw` data at or below
# :math:`\frac{1}{3}` of the desired sample rate, then
#
# 2. decimate the data after epoching, by either passing the ``decim``
# parameter to the :class:`~mne.Epochs` constructor, or using the
# :meth:`~mne.Epochs.decimate` method after the :class:`~mne.Epochs` have
# been created.
#
# .. warning::
# The recommendation for setting the low-pass corner frequency at
# :math:`\frac{1}{3}` of the desired sample rate is a fairly safe rule of
# thumb based on the default settings in :meth:`raw.filter()
# <mne.io.Raw.filter>` (which are different from the filter settings used
# inside the :meth:`raw.resample() <mne.io.Raw.resample>` method). If you
# use a customized lowpass filter (specifically, if your transition
# bandwidth is wider than 0.5× the lowpass cutoff), downsampling to 3× the
# lowpass cutoff may still not be enough to avoid `aliasing`_, and
# MNE-Python will not warn you about it (because the :class:`raw.info
# <mne.Info>` object only keeps track of the lowpass cutoff, not the
# transition bandwidth). Conversely, if you use a steeper filter, the
# warning may be too sensitive. If you are unsure, plot the PSD of your
# filtered data *before decimating* and ensure that there is no content in
# the frequencies above the `Nyquist frequency`_ of the sample rate you'll
# end up with *after* decimation.
#
# Note that this method of manually filtering and decimating is exact only when
# the original sampling frequency is an integer multiple of the desired new
# sampling frequency. Since the sampling frequency of our example data is
# 600.614990234375 Hz, ending up with a specific sampling frequency like (say)
# 90 Hz will not be possible:
current_sfreq = raw.info['sfreq']
desired_sfreq = 90 # Hz
decim = np.round(current_sfreq / desired_sfreq).astype(int)
obtained_sfreq = current_sfreq / decim
lowpass_freq = obtained_sfreq / 3.
raw_filtered = raw.copy().filter(l_freq=None, h_freq=lowpass_freq)
events = mne.find_events(raw_filtered)
epochs = mne.Epochs(raw_filtered, events, decim=decim)
print('desired sampling frequency was {} Hz; decim factor of {} yielded an '
'actual sampling frequency of {} Hz.'
.format(desired_sfreq, decim, epochs.info['sfreq']))
###############################################################################
# If for some reason you cannot follow the above-recommended best practices,
# you should at the very least either:
#
# 1. resample the data *after* epoching, and make your epochs long enough that
# edge effects from the filtering do not affect the temporal span of the
# epoch that you hope to analyze / interpret; or
#
# 2. perform resampling on the :class:`~mne.io.Raw` object and its
# corresponding Events array *simultaneously* so that they stay more or less
# in synch. This can be done by passing the Events array as the
# ``events`` parameter to :meth:`raw.resample() <mne.io.Raw.resample>`.
#
#
# .. LINKS
#
# .. _`AC power line frequency`:
# https://en.wikipedia.org/wiki/Mains_electricity
# .. _`aliasing`: https://en.wikipedia.org/wiki/Anti-aliasing_filter
# .. _`jitter`: https://en.wikipedia.org/wiki/Jitter
# .. _`Nyquist frequency`: https://en.wikipedia.org/wiki/Nyquist_frequency
|
bsd-3-clause
|
wlamond/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
119
|
1679
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
hugobowne/scikit-learn
|
sklearn/cluster/dbscan_.py
|
21
|
12195
|
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p,
n_jobs=n_jobs)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, n_jobs=1):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight,
**self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
|
bsd-3-clause
|
yunfeilu/scikit-learn
|
examples/preprocessing/plot_function_transformer.py
|
161
|
1949
|
"""
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
|
bsd-3-clause
|
ddervs/GreenGraph
|
greengraph/tests/fixtures/generate_green_between_fixtures.py
|
1
|
3269
|
import numpy as np
import requests
from StringIO import StringIO
from matplotlib import image as img
import geopy
import yaml
# First hard-code what is needed for correct output of green_between()
class Map(object):
def __init__(self, latitude, longitude, satellite=True,
zoom=10, size=(400, 400), sensor=False):
base = "http://maps.googleapis.com/maps/api/staticmap?"
params = dict(
sensor=str(sensor).lower(),
zoom=zoom,
size="x".join(map(str, size)),
center=",".join(map(str, (latitude, longitude))),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"] = "satellite"
self.image = requests.get(base, params=params).content
# Fetch our PNG image data
self.pixels = img.imread(StringIO(self.image))
# Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 0]
greener_than_blue = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold=1.1):
return np.sum(self.green(threshold))
class Greengraph(object):
def __init__(self, start, end):
self.start = start
self.end = end
self.geocoder = geopy.geocoders.GoogleV3(
domain="maps.google.co.uk")
def geolocate(self, place):
return self.geocoder.geocode(place,
exactly_one=False)[0][1]
def location_sequence(self, start, end, steps):
lats = np.linspace(start[0], end[0], steps)
longs = np.linspace(start[1], end[1], steps)
return np.vstack([lats, longs]).transpose()
def green_between(self, steps):
return [Map.Map(*location).count_green()
for location in self.location_sequence(
self.geolocate(self.start),
self.geolocate(self.end),
steps)]
# Now build fixtures method
def build_fixture(start, end, steps):
my_graph = Greengraph(start, end)
locations = my_graph.location_sequence(
my_graph.geolocate(my_graph.start),
my_graph.geolocate(my_graph.end),
steps)
green_counts = [None]*len(locations)
for i in range(0, len(locations)):
location = locations[i]
green_counts[i] = Map(*location).count_green()
start_location = my_graph.geolocate(my_graph.start)
end_location = my_graph.geolocate(my_graph.end)
return eval(str(dict(start=start, end=end, start_location=start_location, end_location=end_location,
green_counts=green_counts, steps=steps)))
# Write YAML file
with open('green_between_fixtures.yaml', 'w') as file_to_write:
file_to_write.write(yaml.dump([build_fixture('Paris', 'Chicago', 10)]))
file_to_write.write(yaml.dump([build_fixture('Matlab', 'Bangkok', 10)]))
file_to_write.write(yaml.dump([build_fixture('London', 'Bristol', 10)]))
|
mit
|
lehinevych/Dato-Core
|
src/unity/python/graphlab/test/test_io.py
|
13
|
15881
|
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
import commands
import json
import logging
import os
import re
import tempfile
import unittest
import pandas
import graphlab
import graphlab.connect.main as glconnect
import graphlab.sys_util as _sys_util
from graphlab.test.util import create_server, start_test_tcp_server
from pandas.util.testing import assert_frame_equal
def _test_save_load_object_helper(testcase, obj, url):
"""
Helper function to test save and load a server side object to a given url.
"""
def cleanup(url):
"""
Remove the saved file from temp directory.
"""
protocol = None
path = None
splits = url.split("://")
if len(splits) > 1:
protocol = splits[0]
path = splits[1]
else:
path = url
if not protocol or protocol is "local" or protocol is "remote":
tempdir = tempfile.gettempdir()
pattern = path + ".*"
for f in os.listdir(tempdir):
if re.search(pattern, f):
os.remove(os.path.join(tempdir, f))
if isinstance(obj, graphlab.SGraph):
obj.save(url + ".graph")
newobj = graphlab.load_graph(url + ".graph")
testcase.assertItemsEqual(obj.get_fields(), newobj.get_fields())
testcase.assertDictEqual(obj.summary(), newobj.summary())
elif isinstance(obj, graphlab.Model):
obj.save(url + ".model")
newobj = graphlab.load_model(url + ".model")
testcase.assertItemsEqual(obj.list_fields(), newobj.list_fields())
testcase.assertEqual(type(obj), type(newobj))
elif isinstance(obj, graphlab.SFrame):
obj.save(url + ".frame_idx")
newobj = graphlab.load_sframe(url + ".frame_idx")
testcase.assertEqual(obj.shape, newobj.shape)
testcase.assertEqual(obj.column_names(), newobj.column_names())
testcase.assertEqual(obj.column_types(), newobj.column_types())
assert_frame_equal(obj.head(obj.num_rows()).to_dataframe(),
newobj.head(newobj.num_rows()).to_dataframe())
else:
raise TypeError
cleanup(url)
def create_test_objects():
vertices = pandas.DataFrame({'vid': ['1', '2', '3'],
'color': ['g', 'r', 'b'],
'vec': [[.1, .1, .1], [.1, .1, .1], [.1, .1, .1]]})
edges = pandas.DataFrame({'src_id': ['1', '2', '3'],
'dst_id': ['2', '3', '4'],
'weight': [0., 0.1, 1.]})
graph = graphlab.SGraph().add_vertices(vertices, 'vid').add_edges(edges, 'src_id', 'dst_id')
sframe = graphlab.SFrame(edges)
model = graphlab.pagerank.create(graph)
return (graph, sframe, model)
class LocalFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
if os.path.exists(url):
os.remove(url)
def test_object_save_load(self):
for prefix in ['', 'local://', 'remote://']:
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_basic(self):
self._test_read_write_helper(self.tempfile, 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv", 'hello,world,woof')
self._test_read_write_helper("remote://" + self.tempfile + ".csv", 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper(self.tempfile + ".gz", 'hello world')
self._test_read_write_helper(self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello world')
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello world')
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("/root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("/root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("/root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("/root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("/root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("/root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("/root/tmp.model"))
class RemoteFSConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
glconnect.stop()
auth_token = 'graphlab_awesome'
self.server = start_test_tcp_server(auth_token=auth_token)
glconnect.launch(self.server.get_server_addr(), auth_token=auth_token)
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
@classmethod
def tearDownClass(self):
glconnect.stop()
self.server.stop()
def _test_read_write_helper(self, url, content):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content)
def test_basic(self):
self._test_read_write_helper("remote://" + self.tempfile, 'hello,world,woof')
def test_gzip(self):
self._test_read_write_helper("remote://" + self.tempfile + ".csv.gz", 'hello,world,woof')
def test_object_save_load(self):
prefix = "remote://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
def test_exception(self):
self.assertRaises(ValueError, lambda: self._test_read_write_helper(self.tempfile, 'hello world'))
self.assertRaises(ValueError, lambda: self._test_read_write_helper("local://" + self.tempfile + ".csv.gz", 'hello,world,woof'))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("remote:///root/tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("remote:///root/tmp", '.....'))
self.assertRaises(IOError, lambda: self.graph.save("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: self.sframe.save("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save("remote:///root/tmp.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph("remote:///root/tmp.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe("remote:///root/tmp.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model("remote:///root/tmp.model"))
class HttpConnectorTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def _test_read_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
def test_read(self):
expected = "\n".join([str(unichr(i + ord('a'))) for i in range(26)])
expected = expected + "\n"
self._test_read_helper(self.url, expected)
def test_exception(self):
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(self.url, '.....'))
@unittest.skip("Disabling HDFS Connector Tests")
class HDFSConnectorTests(unittest.TestCase):
# This test requires hadoop to be installed and avaiable in $PATH.
# If not, the tests will be skipped.
@classmethod
def setUpClass(self):
self.has_hdfs = len(_sys_util.get_hadoop_class_path()) > 0
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
def _test_read_write_helper(self, url, content_expected):
url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(url, content_expected)
content_read = glconnect.get_unity().__read__(url)
self.assertEquals(content_read, content_expected)
# clean up the file we wrote
status, output = commands.getstatusoutput('hadoop fs -test -e ' + url)
if status is 0:
commands.getstatusoutput('hadoop fs -rm ' + url)
def test_basic(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_gzip(self):
if self.has_hdfs:
self._test_read_write_helper("hdfs://" + self.tempfile + ".gz", 'hello,world,woof')
self._test_read_write_helper("hdfs://" + self.tempfile + ".csv.gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_object_save_load(self):
if self.has_hdfs:
prefix = "hdfs://"
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
def test_exception(self):
bad_url = "hdfs:///root/"
if self.has_hdfs:
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs:///tmp"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("hdfs://" + self.tempfile))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__(bad_url + "/tmp", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(bad_url + "x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(bad_url + "x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(bad_url + "mygraph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(bad_url + "x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(bad_url + "x.model"))
else:
logging.getLogger(__name__).info("No hdfs avaiable. Test pass.")
@unittest.skip("Disabling S3 Connector Tests")
class S3ConnectorTests(unittest.TestCase):
# This test requires aws cli to be installed. If not, the tests will be skipped.
@classmethod
def setUpClass(self):
status, output = commands.getstatusoutput('aws s3api list-buckets')
self.has_s3 = (status is 0)
self.standard_bucket = None
self.regional_bucket = None
# Use aws cli s3api to find a bucket with "gl-testdata" in the name, and use it as out test bucket.
# Temp files will be read from /written to the test bucket's /tmp folder and be cleared on exist.
if self.has_s3:
try:
json_output = json.loads(output)
bucket_list = [b['Name'] for b in json_output['Buckets']]
assert 'gl-testdata' in bucket_list
assert 'gl-testdata-oregon' in bucket_list
self.standard_bucket = 'gl-testdata'
self.regional_bucket = 'gl-testdata-oregon'
self.tempfile = tempfile.NamedTemporaryFile().name
(self.graph, self.sframe, self.model) = create_test_objects()
except:
logging.getLogger(__name__).warning("Fail parsing ioutput of s3api into json. Please check your awscli version.")
self.has_s3 = False
def _test_read_write_helper(self, url, content_expected):
s3url = graphlab.util._make_internal_url(url)
glconnect.get_unity().__write__(s3url, content_expected)
content_read = glconnect.get_unity().__read__(s3url)
self.assertEquals(content_read, content_expected)
(status, output) = commands.getstatusoutput('aws s3 rm --region us-west-2 ' + url)
if status is not 0:
logging.getLogger(__name__).warning("Cannot remove file: " + url)
def test_basic(self):
if self.has_s3:
for bucket in [self.standard_bucket, self.regional_bucket]:
self._test_read_write_helper("s3://" + bucket + self.tempfile, 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_gzip(self):
if self.has_s3:
self._test_read_write_helper("s3://" + self.standard_bucket + self.tempfile + ".gz", 'hello,world,woof')
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_object_save_load(self):
if self.has_s3:
prefix = "s3://" + self.standard_bucket
_test_save_load_object_helper(self, self.graph, prefix + self.tempfile)
_test_save_load_object_helper(self, self.model, prefix + self.tempfile)
_test_save_load_object_helper(self, self.sframe, prefix + self.tempfile)
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
def test_exception(self):
if self.has_s3:
bad_bucket = "i_am_a_bad_bucket"
prefix = "s3://" + bad_bucket
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3:///"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + self.standard_bucket + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__read__("s3://" + "/somerandomfile"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + "/somerandomfile", "somerandomcontent"))
self.assertRaises(IOError, lambda: glconnect.get_unity().__write__("s3://" + self.standard_bucket + "I'amABadUrl/", "somerandomcontent"))
self.assertRaises(IOError, lambda: self.graph.save(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: self.sframe.save(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: self.model.save(prefix + "/x.model"))
self.assertRaises(IOError, lambda: graphlab.load_graph(prefix + "/x.graph"))
self.assertRaises(IOError, lambda: graphlab.load_sframe(prefix + "/x.frame_idx"))
self.assertRaises(IOError, lambda: graphlab.load_model(prefix + "/x.model"))
else:
logging.getLogger(__name__).info("No s3 bucket avaiable. Test pass.")
|
agpl-3.0
|
ky822/scikit-learn
|
sklearn/cluster/tests/test_affinity_propagation.py
|
341
|
2620
|
"""
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
|
bsd-3-clause
|
arbuz001/sms-tools
|
lectures/03-Fourier-properties/plots-code/symmetry-real-even.py
|
26
|
1150
|
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
from scipy.signal import triang
from scipy.fftpack import fft, fftshift
M = 127
N = 128
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
x = triang(M)
fftbuffer = np.zeros(N)
fftbuffer[:hM1] = x[hM2:]
fftbuffer[N-hM2:] = x[:hM2]
X = fftshift(fft(fftbuffer))
mX = abs(X)
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 4))
plt.subplot(311)
plt.title('x[n]')
plt.plot(np.arange(-hM2, hM1, 1.0), x, 'b', lw=1.5)
plt.axis([-hM2, hM1, 0, 1])
plt.subplot(323)
plt.title('real(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.real(X), 'r', lw=1.5)
plt.axis([-N/2, N/2, min(np.real(X)), max(np.real(X))])
plt.subplot(324)
plt.title('im(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), np.imag(X), 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.subplot(325)
plt.title('abs(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,min(mX),max(mX)])
plt.subplot(326)
plt.title('angle(X)')
plt.plot(np.arange(-N/2, N/2, 1.0), pX, 'c', lw=1.5)
plt.axis([-N/2, N/2, -1, 1])
plt.tight_layout()
plt.savefig('symmetry-real-even.png')
plt.show()
|
agpl-3.0
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/tseries/holiday.py
|
9
|
16177
|
import warnings
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa
from pandas.tseries.offsets import Easter, Day
import numpy as np
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year=%s, ' % self.year
info += 'month=%s, day=%s, ' % (self.month, self.day)
if self.offset is not None:
info += 'offset=%s' % self.offset
if self.observance is not None:
info += 'observance=%s' % self.observance
repr = 'Holiday: %s (%s)' % (self.name, info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = DatetimeIndex(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings(record=True):
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(
cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
mit
|
hainm/scipy
|
scipy/signal/windows.py
|
32
|
53971
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'cosine', 'hann', 'exponential', 'tukey', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the faction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. doi:10.1109/PROC.1978.10837
.. [2] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
if not sym and not odd:
w = w[:-1]
return w
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
if not sym and not odd:
win = win[:-1]
return win
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
if not sym and not odd:
w = w[:-1]
return w
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
if not sym and not odd:
w = w[:-1]
return w
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen,
bohman, blackmanharris, nuttall, barthann, kaiser (needs beta),
gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
exponential (needs decay scale), tukey (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
bsd-3-clause
|
yutiansut/QUANTAXIS
|
QUANTAXIS_Test/QAUtil_Test/QATester_nothing.py
|
2
|
7706
|
import datetime
import struct
import time
import unittest
#from urllib import request
import urllib
import urllib.request
import pandas as pd
import sys
import trace
import cProfile
import re
'''
这个文件的代码 都是 实验性质的。 scribble code!
'''
#from QUANTAXIS import QUANTAXIS as QA
'''
字节串转整数:
转义为short型整数: struct.unpack('<hh', bytes(b'\x01\x00\x00\x00')) ==> (1, 0)
转义为long型整数: struct.unpack('<L', bytes(b'\x01\x00\x00\x00')) ==> (1,)
整数转字节串:
转为两个字节: struct.pack('<HH', 1,2) ==> b'\x01\x00\x02\x00'
转为四个字节: struct.pack('<LL', 1,2) ==> b'\x01\x00\x00\x00\x02\x00\x00\x00'
字符串转字节串:
字符串编码为字节码: '12abc'.encode('ascii') ==> b'12abc'
数字或字符数组: bytes([1,2, ord('1'),ord('2')]) ==> b'\x01\x0212'
16进制字符串: bytes().fromhex('010210') ==> b'\x01\x02\x10'
16进制字符串: bytes(map(ord, '\x01\x02\x31\x32')) ==> b'\x01\x0212'
16进制数组: bytes([0x01,0x02,0x31,0x32]) ==> b'\x01\x0212'
字节串转字符串:
字节码解码为字符串: bytes(b'\x31\x32\x61\x62').decode('ascii') ==> 12ab
字节串转16进制表示,夹带ascii: str(bytes(b'\x01\x0212'))[2:-1] ==> \x01\x0212
字节串转16进制表示,固定两个字符表示: str(binascii.b2a_hex(b'\x01\x0212'))[2:-1] ==> 01023132
字节串转16进制数组: [hex(x) for x in bytes(b'\x01\x0212')] ==> ['0x1', '0x2', '0x31', '0x32']
'''
class QA_Test(unittest.TestCase):
def setUp(self):
today = datetime.date.today()
print(today.year)
print(today.month)
print(today.day)
str = "%04d-%02d-%02d" % (today.year, today.month, today.day)
print(str)
pass
def testProfile(self):
cProfile.run('import cProfile;import re;re.compile("foo|bar")')
def testLambda(self):
#simple list
lst = [('d',82),('a',21),('a',4),('f',29),('q',12),('j',21),('k',99)]
lst.sort(key=lambda k:k[1])
print(lst)
lst.sort(key=lambda k:k[0])
print(lst)
lst.sort(key=lambda k:(k[1], k[0]))
print(lst)
# 复杂的dict,按照dict对象中某一个属性进行排序
lst = [{'level': 19, 'star': 36, 'time': 1},
{'level': 20, 'star': 40, 'time': 2},
{'level': 20, 'star': 40, 'time': 3},
{'level': 20, 'star': 40, 'time': 4},
{'level': 20, 'star': 40, 'time': 5},
{'level': 18, 'star': 40, 'time': 1}]
# 需求:
# level越大越靠前;
# level相同, star越大越靠前;
# level和star相同, time越小越靠前;
# 先按time排序
lst.sort(key=lambda k: (k.get('time', 0)))
t1 = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix],trace=0,count=1)
t1.run('''lst = [{'level': 19, 'star': 36, 'time': 1},{'level': 20, 'star': 40, 'time': 2},{'level': 20, 'star': 40, 'time': 3},{'level': 20, 'star': 40, 'time': 4},{'level': 20, 'star': 40, 'time': 5},{'level': 18, 'star': 40, 'time': 1}];lst.sort(key=lambda k: (k.get('time', 0)))''');
r = t1.results()
r.write_results(show_missing=True, coverdir=".")
cProfile.run('''lst = [{'level': 19, 'star': 36, 'time': 1},{'level': 20, 'star': 40, 'time': 2},{'level': 20, 'star': 40, 'time': 3},{'level': 20, 'star': 40, 'time': 4},{'level': 20, 'star': 40, 'time': 5},{'level': 18, 'star': 40, 'time': 1}];lst.sort(key=lambda k: (k.get('time', 0)))''');
# 再按照level和star顺序
# reverse=True表示反序排列,默认正序排列
lst.sort(key=lambda k: (k.get('level', 0), k.get('star', 0)), reverse=True)
for idx, r in enumerate(lst):
print('idx[%d]\tlevel: %d\t star: %d\t time: %d\t' % (idx, r['level'], r['star'], r['time']))
def setTear(self):
pass
def nottest_QA(self):
fileDad = open("wss0507r.dad", 'rb')
fileDad.seek(0, 0)
index = 0
first4Bytes = fileDad.read(4)
df = pd.DataFrame(columns=["stock_name", "date", "open", "close", "low", "high", "volumn", "turn"],
index=["code"])
if first4Bytes[0] == 0x8c and first4Bytes[1] == 0x19 and first4Bytes[2] == 0xfc and first4Bytes[3] == 0x33:
fileDad.seek(0x08)
byteNumberOfStock = fileDad.read(0x04)
longNumberOfStock = struct.unpack('<L', byteNumberOfStock)
# print(longNumberOfStock);
for iStockIndex in range(0, longNumberOfStock[0]):
fileDad.seek(0x10 + iStockIndex * 4 * 0x10)
aStockData = fileDad.read(0x10 * 4)
if aStockData[0] == 0xFF and aStockData[1] == 0xFF and aStockData[2] == 0xFF and aStockData[3] == 0xFF:
codeNameByte = aStockData[4:0x10]
# print(codeNameByte)
strCodeName = codeNameByte.decode('gbk')
# print(strCodeName);
stockNameByte = aStockData[0x14: 0x20]
# print(stockNameByte);
strStockName = stockNameByte.decode('gbk')
# print(strStockName);
stockTime = aStockData[0x20: 0x24]
stockTimeNumber = struct.unpack('<L', stockTime)
time_local = time.localtime(stockTimeNumber[0])
#dt = time.strftime("%Y-%m-%d %H:%M:%S", time_local)
dt = time.strftime("%Y-%m-%d", time_local)
# print(dt);
i = 1
byte_stock_open = aStockData[0x20 +
(i * 4): 0x20+((i+1) * 4)]
i = 2
byte_stock_close = aStockData[0x20 +
(i * 4): 0x20+((i+1) * 4)]
i = 3
byte_stock_low = aStockData[0x20+(i * 4): 0x20+((i+1) * 4)]
i = 4
byte_stock_high = aStockData[0x20 +
(i * 4): 0x20+((i+1) * 4)]
i = 5
byte_stock_volume = aStockData[0x20 +
(i * 4): 0x20+((i+1) * 4)]
i = 6
byte_stock_turn = aStockData[0x20 +
(i * 4): 0x20+((i+1) * 4)]
i = 7
v1 = struct.unpack('<f', byte_stock_open)
stock_open = v1[0]
v1 = struct.unpack('<f', byte_stock_close)
stock_close = v1[0]
v1 = struct.unpack('<f', byte_stock_low)
stock_low = v1[0]
v1 = struct.unpack('<f', byte_stock_high)
stock_high = v1[0]
v1 = struct.unpack('<f', byte_stock_volume)
stock_volume = v1[0]
v1 = struct.unpack('<f', byte_stock_turn)
stock_turn = v1[0]
#print("%f %f %f %f %f %f "%(stock_open, stock_close, stock_high,stock_low, stock_volume, stock_turn))
# print("------")
df.index.astype(str)
df.loc[strCodeName] = [strStockName, dt, stock_open,
stock_close, stock_low, stock_high, stock_volume, stock_turn]
pass
pass
fileDad.close()
print(df)
return df
pass
|
mit
|
ragulpr/wtte-rnn
|
python/wtte/plots/weibull_heatmap.py
|
1
|
4946
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from six.moves import xrange
from wtte import weibull
def basic_heatmap(ax, pred, max_horizon=None, resolution=None, cmap='jet'):
if max_horizon is None:
max_horizon = pred.shape[0]
if resolution is None:
resolution = max_horizon
ax.imshow(pred.T, origin='lower', interpolation='none',
aspect='auto', cmap=cmap)
ax.set_yticks([x * (resolution + 0.0) /
max_horizon for x in [0, max_horizon / 2, max_horizon - 1]])
ax.set_yticklabels([0, max_horizon / 2, max_horizon - 1])
ax.set_ylim(-0.5, resolution - 0.5)
ax.set_ylabel('steps ahead $s$')
return ax
def weibull_heatmap(
fig, ax,
t,
a,
b,
max_horizon,
time_to_event=None,
true_time_to_event=None,
censoring_indicator=None,
title='predicted Weibull pmf $p(t,s)$',
lw=1.0,
is_discrete=True,
resolution=None,
xax_nbins=10,
yax_nbins=4,
cmap='jet'
):
"""
Adds a continuous or discrete heatmap with TTE to ax.
Caveats:
- axis are pixels so axis's always discrete.
(so we want location of labels to be in middle)
"""
if resolution is None:
# Resolution. Defaults to 1/step. Want more with pdf.
resolution = max_horizon
# Discrete
if is_discrete:
prob_fun = weibull.pmf
drawstyle = 'steps-post'
else:
prob_fun = weibull.pdf
# drawstyle defaults to straight line.
drawstyle = None
# Number of timesteps
n = len(t)
# no time to event
# no true time to event
# no is_censored
# all is_censored
# ok with true_time_to_event missing but not
# ok with true_
if time_to_event is not None:
if censoring_indicator is not None and true_time_to_event is None:
is_censored = np.array(censoring_indicator).astype(bool)
if true_time_to_event is not None:
is_censored = (time_to_event < true_time_to_event)
else:
true_time_to_event = np.ones_like(time_to_event)
true_time_to_event[:] = np.nan
true_time_to_event[~is_censored] = time_to_event[~is_censored]
assert len(t) == n
assert len(a) == n
assert len(b) == n
assert len(time_to_event) == n
assert len(true_time_to_event) == n
assert len(is_censored) == n
pred = prob_fun(
np.tile(np.linspace(0, max_horizon - 1, resolution), (n, 1)),
np.tile(a.reshape(n, 1), (1, resolution)),
np.tile(b.reshape(n, 1), (1, resolution))
)
ax = basic_heatmap(ax, pred, max_horizon, resolution,
cmap=cmap)
ax.set_title(title)
def ax_add_scaled_line(ax, t, y, y_value_max, y_n_pixels, drawstyle,
linestyle='solid',
color='black',
label=None):
# Shifts and scales y to fit on an imshow as we expect it to be, i.e
# passing through middle of a pixel
scaled_y = ((y_n_pixels + 0.0) / y_value_max) * y
ax.plot(t - 0.5, scaled_y, lw=lw, linestyle=linestyle,
drawstyle=drawstyle, color=color, label=label)
# Adds last segment of steps-post that gets missing
ax.plot([t[-1] - 0.5, t[-1] + 0.5], [scaled_y[-1], scaled_y[-1]],
lw=lw,
linestyle=linestyle,
drawstyle=drawstyle,
color=color)
ax.set_xlim(-0.5, n - 0.5)
if time_to_event is not None:
if not all(is_censored):
ax_add_scaled_line(ax,
t,
true_time_to_event,
y_value_max=max_horizon,
y_n_pixels=resolution,
drawstyle=drawstyle,
linestyle='solid',
color='black',
label='time_to_event')
if not all(~is_censored):
ax_add_scaled_line(ax,
t,
time_to_event,
y_value_max=max_horizon,
y_n_pixels=resolution,
drawstyle=drawstyle,
linestyle='dotted',
color='black',
label='(censored)')
ax.locator_params(axis='y', nbins=4)
ax.locator_params(axis='x', nbins=10)
# [ax.axvline(x=k+1,lw=0.1,c='gray') for k in xrange(n-1)]
# for k in [0,1,2]:
# ax[k].set_xticks(ax[5].get_xticks()-0.5)
# ax[k].set_xticklabels(ax[5].get_xticks().astype(int))
ax.set_xlabel('time')
fig.tight_layout()
return fig, ax
|
mit
|
cbertinato/pandas
|
pandas/plotting/_misc.py
|
1
|
15347
|
from contextlib import contextmanager
import warnings
from pandas.util._decorators import deprecate_kwarg
from pandas.plotting._core import _get_plot_backend
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
ax : Matplotlib axes object
data : DataFrame or Series
data for table contents
kwargs : keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
"""
plot_backend = _get_plot_backend()
return plot_backend.table(ax=ax, data=data, rowLabels=None, colLabels=None,
**kwargs)
def register(explicit=True):
"""
Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
"""
plot_backend = _get_plot_backend()
plot_backend.register(explicit=explicit)
def deregister():
"""
Remove pandas' formatters and converters
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
deregister_matplotlib_converters
"""
plot_backend = _get_plot_backend()
plot_backend.deregister()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Returns
-------
numpy.ndarray
A matrix of scatter plots.
Examples
--------
>>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
plot_backend = _get_plot_backend()
return plot_backend.scatter_matrix(
frame=frame, alpha=alpha, figsize=figsize, ax=ax, grid=grid,
diagonal=diagonal, marker=marker, density_kwds=density_kwds,
hist_kwds=hist_kwds, range_padding=range_padding, **kwds)
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""
Plot a multidimensional dataset in 2D.
Each Series in the DataFrame is represented as a evenly distributed
slice on a circle. Each data point is rendered in the circle according to
the value on each Series. Highly correlated `Series` in the `DataFrame`
are placed closer on the unit circle.
RadViz allow to project a N-dimensional data set into a 2D space where the
influence of each dimension can be interpreted as a balance between the
influence of all dimensions.
More info available at the `original article
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
----------
frame : `DataFrame`
Pandas object holding the data.
class_column : str
Column name containing the name of the data point category.
ax : :class:`matplotlib.axes.Axes`, optional
A plot instance to which to add the information.
color : list[str] or tuple[str], optional
Assign a color to each category. Example: ['blue', 'green'].
colormap : str or :class:`matplotlib.colors.Colormap`, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
kwds : optional
Options to pass to matplotlib scatter plotting method.
Returns
-------
class:`matplotlib.axes.Axes`
See Also
--------
plotting.andrews_curves : Plot clustering visualization.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,
... 6.7, 4.6],
... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,
... 3.3, 3.6],
... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,
... 5.7, 1.0],
... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,
... 2.1, 0.2],
... 'Category': ['virginica', 'virginica', 'setosa',
... 'virginica', 'virginica', 'versicolor',
... 'versicolor', 'setosa', 'virginica',
... 'setosa']
... })
>>> rad_viz = pd.plotting.radviz(df, 'Category') # doctest: +SKIP
"""
plot_backend = _get_plot_backend()
return plot_backend.radviz(frame=frame, class_column=class_column, ax=ax,
color=color, colormap=colormap, **kwds)
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
"""
plot_backend = _get_plot_backend()
return plot_backend.andrews_curves(frame=frame, class_column=class_column,
ax=ax, samples=samples, color=color,
colormap=colormap, **kwds)
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""
Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relaying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Pandas Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
greater or equal than the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds :
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.figure.Figure
Matplotlib figure.
See Also
--------
DataFrame.plot : Basic plotting for DataFrame objects.
Series.plot : Basic plotting for Series objects.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series(np.random.uniform(size=100))
>>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP
"""
plot_backend = _get_plot_backend()
return plot_backend.bootstrap_plot(series=series, fig=fig, size=size,
samples=samples, **kwds)
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, axvlines_kwds=None, sort_labels=False,
**kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame : DataFrame
class_column : str
Column name containing class names
cols : list, optional
A list of column names to use
ax : matplotlib.axis, optional
matplotlib axis object
color : list or tuple, optional
Colors to use for the different classes
use_columns : bool, optional
If true, columns will be used as xticks
xticks : list or tuple, optional
A list of values to use for xticks
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds : keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels : bool, False
Sort class_column labels, useful when assigning colors
.. versionadded:: 0.20.0
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlib.axis.Axes`
Examples
--------
>>> from matplotlib import pyplot as plt
>>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> pd.plotting.parallel_coordinates(
df, 'Name',
color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
plot_backend = _get_plot_backend()
return plot_backend.parallel_coordinates(
frame=frame, class_column=class_column, cols=cols, ax=ax, color=color,
use_columns=use_columns, xticks=xticks, colormap=colormap,
axvlines=axvlines, axvlines_kwds=axvlines_kwds,
sort_labels=sort_labels, **kwds)
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters
----------
series : Time series
lag : lag of the scatter plot, default 1
ax : Matplotlib axis object, optional
kwds : Matplotlib scatter method keyword arguments, optional
Returns
-------
class:`matplotlib.axis.Axes`
"""
plot_backend = _get_plot_backend()
return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds)
def autocorrelation_plot(series, ax=None, **kwds):
"""
Autocorrelation plot for time series.
Parameters
----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlib.axis.Axes`
"""
plot_backend = _get_plot_backend()
return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwds)
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
.. deprecated:: 0.23.0
Use Series.plot() instead
"""
warnings.warn("'tsplot' is deprecated and will be removed in a "
"future version. Please use Series.plot() instead.",
FutureWarning, stacklevel=2)
plot_backend = _get_plot_backend()
return plot_backend.tsplot(series=series, plotf=plotf, ax=ax, **kwargs)
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self, deprecated=False):
self._deprecated = deprecated
# self['xaxis.compat'] = False
super().__setitem__('xaxis.compat', False)
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError(
'{key} is not a valid pandas plotting option'.format(key=key))
return super().__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super().__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError(
'Cannot remove default parameter {key}'.format(key=key))
return super().__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super().__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
|
bsd-3-clause
|
ElDeveloper/scikit-learn
|
sklearn/model_selection/tests/test_search.py
|
20
|
30855
|
"""Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
print(cv)
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
bsd-3-clause
|
MPBA/pyHRV
|
pyphysio/interactive.py
|
2
|
8508
|
# coding=utf-8
from __future__ import print_function
from __future__ import division
import matplotlib.pyplot as plt
import pyphysio as ph
import numpy as np
class _MouseSelectionFilter(object):
def __init__(self, onselect):
self._select = onselect
self._last_press = None
def on_move(self, event):
self._last_press = None
def on_press(self, event):
x, y = event.xdata, event.ydata
self._last_press = x, y, event.button
def on_release(self, event):
x, y = event.xdata, event.ydata
if self._last_press is not None:
xx, yy, b = self._last_press
if x == xx and y == yy and event.button == b:
self._select(event)
class _ItemManager(object):
def __init__(self, snap_func, select, unselect, add, delete):
self._snap_func = snap_func
self._select = select
self._unselect = unselect
self._delete = delete
self._add = add
self.selection = -1
def unselect(self):
self._unselect(self.selection)
self.selection = None
def on_select(self, ev):
if ev.xdata is not None and ev.ydata is not None:
x, y, item, new = self._snap_func(ev.xdata, ev.ydata)
# print("on_select: %d, %d: %d" % (x, y, item))
if self.selection is not None:
self.unselect()
if ev.button == 1:
if new:
self._add(x, y, item)
else:
self.selection = item
self._select(item)
class Annotate(object):
def __init__(self, ecg, ibi):
self.plots = None
self.peaks_t = None
self.done = False
self.ecg = ecg
self.ibi = ibi
self.fig = plt.figure()
self.p_sig = self.fig.add_subplot(2, 1, 1)
self.p_res = self.fig.add_subplot(2, 1, 2, sharex=self.p_sig)
self.max = ph.Max()(self.ecg)
self.min = ph.Min()(self.ecg)
self.margin = ph.Range()(self.ecg) * .1
self.max += self.margin
self.min -= self.margin
if isinstance(ibi, ph.UnevenlySignal):
self.peaks_t = self.ibi.get_times()
self.peaks_v = self.ibi.get_values()
else:
self.peaks_t = np.empty(0)
self.peaks_v = np.empty(0)
self.p_sig.plot(self.ecg.get_times(), self.ecg.get_values(), 'b')
self.p_res.plot(self.peaks_t, self.peaks_v, 'b'),
self.p_res.plot(self.peaks_t, self.peaks_v, 'go')
self.replot()
class Cursor(object):
left = None
right = None
radius = .3
radiusi = int(radius * self.ecg.get_sampling_freq())
@staticmethod
def on_move(event):
Cursor.draw(event)
@staticmethod
def on_scroll(event):
if event.button == "up":
Cursor.radiusi += 3
elif event.button == "down":
Cursor.radiusi -= 7
Cursor.radius = Cursor.radiusi / self.ecg.get_sampling_freq()
Cursor.draw(event)
@staticmethod
def draw(event):
if Cursor.left is not None:
Cursor.left.remove()
Cursor.right.remove()
Cursor.left = None
Cursor.right = None
if event.xdata is not None: # TODO (Andrea): not do this if speed (dxdata/dt) is high
Cursor.left = self.p_sig.vlines(event.xdata - Cursor.radius, self.min - self.margin * 2,
self.max + self.margin * 2, 'k')
Cursor.right = self.p_sig.vlines(event.xdata + Cursor.radius, self.min - self.margin * 2,
self.max + self.margin * 2, 'k')
self.fig.canvas.draw()
def find_peak(s):
return np.argmax(s)
def snap(xdata, ydata):
nearest_after = self.peaks_t.searchsorted(xdata)
nearest_prev = nearest_after - 1
dist_after = self.peaks_t[nearest_after] - xdata if 0 <= nearest_after < len(self.peaks_t) else None
dist_prev = xdata - self.peaks_t[nearest_prev] if 0 <= nearest_prev < len(self.peaks_t) else None
if dist_after is None or dist_prev < dist_after:
if dist_prev is not None and dist_prev < Cursor.radius:
return self.peaks_t[nearest_prev], ydata, nearest_prev, False
elif dist_prev is None or dist_after < dist_prev:
if dist_after is not None and dist_after < Cursor.radius:
return self.peaks_t[nearest_after], ydata, nearest_after, False
s = self.ecg.segment_time(xdata - Cursor.radius, xdata + Cursor.radius)
s = np.array(s)
m = find_peak(s)
return xdata - Cursor.radius + m / self.ecg.get_sampling_freq(), ydata, nearest_after, True
class Selector(object):
selector = None
@staticmethod
def select(item):
# print("select: %d" % item)
Selector.selector = self.p_sig.vlines(self.peaks_t[item], self.min - self.margin, self.max + self.margin, 'g')
@staticmethod
def unselect(item):
if Selector.selector is not None:
# print("unselect: %d" % item)
Selector.selector.remove()
# it is correct that the computation of the values is done at the end (line 186)
def add(time, y, pos):
self.peaks_t = np.insert(self.peaks_t, pos, time)
self.replot()
def delete(item):
self.peaks_t = np.delete(self.peaks_t, item)
self.replot()
im = _ItemManager(snap, Selector.select, Selector.unselect, add, delete)
mf = _MouseSelectionFilter(im.on_select)
def press(ev):
# print(ev.key)
if ev.key == "d" and im.selection is not None:
delete(im.selection)
im.unselect()
def handle_close(ev):
self.done = True
return
clim = self.fig.canvas.mpl_connect('motion_notify_event', lambda e: (mf.on_move(e), Cursor.on_move(e)))
clip = self.fig.canvas.mpl_connect('button_press_event', mf.on_press)
clir = self.fig.canvas.mpl_connect('button_release_event', mf.on_release)
clis = self.fig.canvas.mpl_connect('scroll_event', Cursor.on_scroll)
clik = self.fig.canvas.mpl_connect('key_press_event', press)
ccls = self.fig.canvas.mpl_connect('close_event', handle_close)
while not self.done :
# print('waiting')
plt.pause(1)
plt.close(self.fig)
# it is correct that the computation of the values is done at the end!
# do not change!
self.peaks_v = np.diff(self.peaks_t)
self.peaks_v = np.r_[self.peaks_v[0], self.peaks_v]
if isinstance(ibi, ph.UnevenlySignal):
self.ibi_ok = ph.UnevenlySignal(values=self.peaks_v,
sampling_freq=self.ibi.get_sampling_freq(),
signal_type=self.ibi.get_signal_type(),
start_time=self.ibi.get_start_time(),
x_values=self.peaks_t,
x_type='instants',
duration=self.ibi.get_duration())
else:
self.ibi_ok = ph.UnevenlySignal(values=self.peaks_v,
sampling_freq=self.ecg.get_sampling_freq(),
signal_type=self.ecg.get_signal_type(),
start_time=self.ecg.get_start_time(),
x_values=self.peaks_t,
x_type='instants',
duration=self.ecg.get_duration())
def __call__(self):
return self.ibi_ok
def replot(self):
if self.plots is not None:
self.plots.remove()
if self.peaks_t is not None:
self.plots = self.p_sig.vlines(self.peaks_t, self.min, self.max, 'y')
self.fig.canvas.draw()
|
gpl-3.0
|
xuewei4d/scikit-learn
|
benchmarks/bench_feature_expansions.py
|
34
|
1761
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sparse
from sklearn.preprocessing import PolynomialFeatures
from time import time
degree = 2
trials = 3
num_rows = 1000
dimensionalities = np.array([1, 2, 8, 16, 32, 64])
densities = np.array([0.01, 0.1, 1.0])
csr_times = {d: np.zeros(len(dimensionalities)) for d in densities}
dense_times = {d: np.zeros(len(dimensionalities)) for d in densities}
transform = PolynomialFeatures(degree=degree, include_bias=False,
interaction_only=False)
for trial in range(trials):
for density in densities:
for dim_index, dim in enumerate(dimensionalities):
print(trial, density, dim)
X_csr = sparse.random(num_rows, dim, density).tocsr()
X_dense = X_csr.toarray()
# CSR
t0 = time()
transform.fit_transform(X_csr)
csr_times[density][dim_index] += time() - t0
# Dense
t0 = time()
transform.fit_transform(X_dense)
dense_times[density][dim_index] += time() - t0
csr_linestyle = (0, (3, 1, 1, 1, 1, 1)) # densely dashdotdotted
dense_linestyle = (0, ()) # solid
fig, axes = plt.subplots(nrows=len(densities), ncols=1, figsize=(8, 10))
for density, ax in zip(densities, axes):
ax.plot(dimensionalities, csr_times[density] / trials,
label='csr', linestyle=csr_linestyle)
ax.plot(dimensionalities, dense_times[density] / trials,
label='dense', linestyle=dense_linestyle)
ax.set_title("density %0.2f, degree=%d, n_samples=%d" %
(density, degree, num_rows))
ax.legend()
ax.set_xlabel('Dimensionality')
ax.set_ylabel('Time (seconds)')
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
demianw/dipy
|
doc/examples/tracking_tissue_classifier.py
|
9
|
11346
|
"""
=================================================
Using Various Tissue Classifiers for Tractography
=================================================
The tissue classifier determines if the tracking stops or continues at each
tracking position. The tracking stops when it reaches an ending region
(e.g. low FA, gray matter or corticospinal fluid regions) or exits the image
boundaries. The tracking also stops if the direction getter has no direction
to follow.
Each tissue classifier determines if the stopping is 'valid' or
'invalid'. A streamline is 'valid' when the tissue classifier determines if
the streamline stops in a position classified as 'ENDPOINT' or 'OUTSIDEIMAGE'.
A streamline is 'invalid' when it stops in a position classified as
'TRACKPOINT' or 'INVALIDPOINT'. These conditions are described below. The
'LocalTracking' generator can be set to output all generated streamlines
or only the 'valid' ones.
This example is an extension of the
:ref:`example_deterministic_fiber_tracking` example. We begin by loading the
data, fitting a Constrained Spherical Deconvolution (CSD) reconstruction
model and creating the maximum deterministic direction getter.
"""
import numpy as np
from dipy.data import read_stanford_labels, default_sphere
from dipy.direction import DeterministicMaximumDirectionGetter
from dipy.io.trackvis import save_trk
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel,
auto_response)
from dipy.tracking.local import LocalTracking
from dipy.tracking import utils
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
ren = fvtk.ren()
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.get_affine()
seed_mask = labels == 2
white_matter = (labels == 1) | (labels == 2)
seeds = utils.seeds_from_mask(seed_mask, density=2, affine=affine)
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
csd_fit = csd_model.fit(data, mask=white_matter)
dg = DeterministicMaximumDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
max_angle=30.,
sphere=default_sphere)
"""
Threshold Tissue Classifier
---------------------------
A scalar map can be used to define where the tracking stops. The threshold
tissue classifier uses a scalar map to stop the tracking whenever the
interpolated scalar value is lower than a fixed threshold. Here, we show
an example using the fractional anisotropy (FA) map of the DTI model.
The threshold tissue classifier uses a trilinear interpolation at the
tracking position.
**Parameters**
- metric_map: numpy array [:, :, :]
- threshold: float
**Stopping criterion**
- 'ENDPOINT': metric_map < threshold,
- 'OUTSIDEIMAGE': tracking point outside of metric_map,
- 'TRACKPOINT': stop because no direction is available,
- 'INVALIDPOINT': N/A.
"""
import matplotlib.pyplot as plt
import dipy.reconst.dti as dti
from dipy.reconst.dti import fractional_anisotropy
from dipy.tracking.local import ThresholdTissueClassifier
tensor_model = dti.TensorModel(gtab)
tenfit = tensor_model.fit(data, mask=labels > 0)
FA = fractional_anisotropy(tenfit.evals)
threshold_classifier = ThresholdTissueClassifier(FA, .2)
fig = plt.figure()
mask_fa = FA.copy()
mask_fa[mask_fa < 0.2] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(mask_fa[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
fig.tight_layout()
fig.savefig('threshold_fa.png')
"""
.. figure:: threshold_fa.png
:align: center
**Thresholded fractional anisotropy map.**
"""
all_streamlines_threshold_classifier = LocalTracking(dg,
threshold_classifier,
seeds,
affine,
step_size=.5,
return_all=True)
save_trk("deterministic_threshold_classifier_all.trk",
all_streamlines_threshold_classifier,
affine,
labels.shape)
streamlines = [sl for sl in all_streamlines_threshold_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_threshold_classifier.png',
size=(600, 600))
"""
.. figure:: all_streamlines_threshold_classifier.png
:align: center
**Deterministic tractography using a thresholded fractional anisotropy.**
"""
"""
Binary Tissue Classifier
------------------------
A binary mask can be used to define where the tracking stops. The binary
tissue classifier stops the tracking whenever the tracking position is outside
the mask. Here, we show how to obtain the binary tissue classifier from
the white matter mask defined above. The binary tissue classifier uses a
nearest-neighborhood interpolation at the tracking position.
**Parameters**
- mask: numpy array [:, :, :]
**Stopping criterion**
- 'ENDPOINT': mask = 0
- 'OUTSIDEIMAGE': tracking point outside of mask
- 'TRACKPOINT': no direction is available
- 'INVALIDPOINT': N/A
"""
from dipy.tracking.local import BinaryTissueClassifier
binary_classifier = BinaryTissueClassifier(white_matter)
fig = plt.figure()
plt.xticks([])
plt.yticks([])
fig.tight_layout()
plt.imshow(white_matter[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
fig.savefig('white_matter_mask.png')
"""
.. figure:: white_matter_mask.png
:align: center
**White matter binary mask.**
"""
all_streamlines_binary_classifier = LocalTracking(dg,
binary_classifier,
seeds,
affine,
step_size=.5,
return_all=True)
save_trk("deterministic_binary_classifier_all.trk",
all_streamlines_binary_classifier,
affine,
labels.shape)
streamlines = [sl for sl in all_streamlines_binary_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_binary_classifier.png',
size=(600, 600))
"""
.. figure:: all_streamlines_binary_classifier.png
:align: center
**Deterministic tractography using a binary white matter mask.**
"""
"""
ACT Tissue Classifier
---------------------
Anatomically-constrained tractography (ACT) [Smith2012]_ uses information from
anatomical images to determine when the tractography stops. The 'include_map'
defines when the streamline reached a 'valid' stopping region (e.g. gray
matter partial volume estimation (PVE) map) and the 'exclude_map' defines when
the streamline reached an 'invalid' stopping region (e.g. corticospinal fluid
PVE map). The background of the anatomical image should be added to the
'include_map' to keep streamlines exiting the brain (e.g. through the
brain stem). The ACT tissue classifier uses a trilinear interpolation
at the tracking position.
**Parameters**
- include_map: numpy array [:, :, :],
- exclude_map: numpy array [:, :, :],
**Stopping criterion**
- 'ENDPOINT': include_map > 0.5,
- 'OUTSIDEIMAGE': tracking point outside of include_map or exclude_map,
- 'TRACKPOINT': no direction is available,
- 'INVALIDPOINT': exclude_map > 0.5.
"""
from dipy.data import read_stanford_pve_maps
from dipy.tracking.local import ActTissueClassifier
img_pve_csf, img_pve_gm, img_pve_wm = read_stanford_pve_maps()
background = np.ones(img_pve_gm.shape)
background[(img_pve_gm.get_data() +
img_pve_wm.get_data() +
img_pve_csf.get_data()) > 0] = 0
include_map = img_pve_gm.get_data()
include_map[background > 0] = 1
exclude_map = img_pve_csf.get_data()
act_classifier = ActTissueClassifier(include_map, exclude_map)
fig = plt.figure()
plt.subplot(121)
plt.xticks([])
plt.yticks([])
plt.imshow(include_map[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
plt.subplot(122)
plt.xticks([])
plt.yticks([])
plt.imshow(exclude_map[:, :, data.shape[2] / 2].T, cmap='gray', origin='lower',
interpolation='nearest')
fig.tight_layout()
fig.savefig('act_maps.png')
"""
.. figure:: act_maps.png
:align: center
**Include (left) and exclude (right) maps for ACT.**
"""
all_streamlines_act_classifier = LocalTracking(dg,
act_classifier,
seeds,
affine,
step_size=.5,
return_all=True)
save_trk("deterministic_act_classifier_all.trk",
all_streamlines_act_classifier,
affine,
labels.shape)
streamlines = [sl for sl in all_streamlines_act_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='all_streamlines_act_classifier.png',
size=(600, 600))
"""
.. figure:: all_streamlines_act_classifier.png
:align: center
**Deterministic tractography using ACT stopping criterion.**
"""
valid_streamlines_act_classifier = LocalTracking(dg,
act_classifier,
seeds,
affine,
step_size=.5,
return_all=False)
save_trk("deterministic_act_classifier_valid.trk",
valid_streamlines_act_classifier,
affine,
labels.shape)
streamlines = [sl for sl in valid_streamlines_act_classifier]
fvtk.clear(ren)
fvtk.add(ren, fvtk.line(streamlines, line_colors(streamlines)))
fvtk.record(ren, out_path='valid_streamlines_act_classifier.png',
size=(600, 600))
"""
.. figure:: valid_streamlines_act_classifier.png
:align: center
**Deterministic tractography using a anatomically-constrained tractography
stopping criterion. Streamlines ending in gray matter region only.**
"""
"""
The threshold and binary tissue classifiers use respectively a scalar map and a
binary mask to stop the tracking. The ACT tissue classifier use partial volume
fraction (PVE) maps from an anatomical image to stop the tracking. Additionally,
the ACT tissue classifier determines if the tracking stopped in expected regions
(e.g. gray matter) and allows the user to get only streamlines stopping in those
regions.
Notes
------
Currently in ACT the proposed method that cuts streamlines going through
subcortical gray matter regions is not implemented. The backtracking technique
for streamlines reaching INVALIDPOINT is not implemented either.
References
----------
.. [Smith2012] Smith, R. E., Tournier, J.-D., Calamante, F., & Connelly, A.
Anatomically-constrained tractography: Improved diffusion MRI
streamlines tractography through effective use of anatomical
information. NeuroImage, 63(3), 1924-1938, 2012.
"""
|
bsd-3-clause
|
willcode/gnuradio
|
gr-filter/examples/synth_filter.py
|
6
|
1806
|
#!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
def main():
N = 1000000
fs = 8000
freqs = [100, 200, 300, 400, 500]
nchans = 7
sigs = list()
for fi in freqs:
s = analog.sig_source_c(fs, analog.GR_SIN_WAVE, fi, 1)
sigs.append(s)
taps = filter.firdes.low_pass_2(len(freqs), fs,
fs/float(nchans)/2, 100, 100)
print("Num. Taps = %d (taps per filter = %d)" % (len(taps),
len(taps) / nchans))
filtbank = filter.pfb_synthesizer_ccf(nchans, taps)
head = blocks.head(gr.sizeof_gr_complex, N)
snk = blocks.vector_sink_c()
tb = gr.top_block()
tb.connect(filtbank, head, snk)
for i,si in enumerate(sigs):
tb.connect(si, (filtbank, i))
tb.run()
if 1:
f1 = pyplot.figure(1)
s1 = f1.add_subplot(1,1,1)
s1.plot(snk.data()[1000:])
fftlen = 2048
f2 = pyplot.figure(2)
s2 = f2.add_subplot(1,1,1)
winfunc = numpy.blackman
s2.psd(snk.data()[10000:], NFFT=fftlen,
Fs = nchans*fs,
noverlap=fftlen / 4,
window = lambda d: d*winfunc(fftlen))
pyplot.show()
if __name__ == "__main__":
main()
|
gpl-3.0
|
jmmease/pandas
|
pandas/tests/io/test_sql.py
|
4
|
94956
|
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
from __future__ import print_function
from warnings import catch_warnings
import pytest
import sqlite3
import csv
import os
import warnings
import numpy as np
import pandas as pd
from datetime import datetime, date, time
from pandas.core.dtypes.common import (
is_object_dtype, is_datetime64_dtype,
is_datetime64tz_dtype)
from pandas import DataFrame, Series, Index, MultiIndex, isna, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import range, lrange, string_types, PY36
from pandas.core.tools.datetimes import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz',
'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
},
'create_view': {
'sqlite': """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
}
}
class MixInBase(object):
def teardown_method(self, method):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_mysql_name(table_name))
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute('SHOW TABLES')
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute("DROP TABLE IF EXISTS %s" %
sql._get_valid_sqlite_name(table_name))
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute(
"SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest(object):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table('iris_view')
self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906,
-0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543,
0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26',
'1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
assert self.pandasSQL.has_table('test_frame1')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
assert self.pandasSQL.has_table('test_frame1')
pytest.raises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
assert self.pandasSQL.has_table('test_frame1')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
assert num_rows == num_entries
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
assert self.pandasSQL.has_table('test_frame1')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
assert num_rows == num_entries
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query(
'SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
assert ix_cols == [['A', ], ]
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setup_method(self, method):
self.conn = self.connect()
self._load_iris_data()
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn)
assert sql.has_table('test_frame1', self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, if_exists='fail')
assert sql.has_table('test_frame2', self.conn)
pytest.raises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, if_exists='replace')
assert sql.has_table('test_frame3', self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, if_exists='append')
assert sql.has_table('test_frame4', self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5', self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
with catch_warnings(record=True):
panel = tm.makePanel()
pytest.raises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(
Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a': [1 + 1j, 2j]})
# Complex data type should raise error
pytest.raises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'index'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "index_name"
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "other_label"
# index name is integer
temp_frame.index.name = 0
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
temp_frame.index.name = None
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=0)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == "0"
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product(
[('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[0] == 'level_0'
assert frame.columns[1] == 'level_1'
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['A', 'B']
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['A', 'B']
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
assert frame.columns[:2].tolist() == ['C', 'D']
# wrong length of index_label
pytest.raises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1, 2.1, 'line1'), (2, 1.5, 'line2')],
columns=['A', 'B', 'C'], index=['A', 'B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A', 'B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', con=self.conn)
assert 'CREATE' in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a': [1.1, 1.2], 'b': [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test',
con=self.conn, dtype={'b': dtype})
assert 'CREATE' in create_sql
assert 'INTEGER' in create_sql
def test_get_schema_keys(self):
frame = DataFrame({'Col1': [1.1, 1.2], 'Col2': [2.1, 2.2]})
create_sql = sql.get_schema(frame, 'test', con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=[u'\xe9', u'b'])
df.to_sql('test_unicode', self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})
df.to_sql('d1187b08-4943-4c8d-a7f6', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM `d1187b08-4943-4c8d-a7f6`',
self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
pytest.skip('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table(
'test_frame', self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table('test_frame', self.conn,
index_col=["A", "B"],
columns=["C", "D"])
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
assert isinstance(table.table.c['time'].type, sqltypes.DateTime)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = 'sqlite:///' + name
table = 'iris'
test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = 'SELECT * FROM iris'
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with tm.assert_raises_regex(ImportError, "pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table('iris', metadata,
sa.Column('SepalLength', sa.REAL),
sa.Column('SepalWidth', sa.REAL),
sa.Column('PetalLength', sa.REAL),
sa.Column('PetalWidth', sa.REAL),
sa.Column('Name', sa.TEXT)
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text('select * from iris where name=:name')
iris_df = sql.read_sql(name_text, self.conn, params={
'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
assert all_names == set(['Iris-versicolor'])
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam('name'))
iris_df = sql.read_sql(name_select, self.conn,
params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
assert all_names == set(['Iris-setosa'])
class _EngineToConnMixin(object):
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
def setup_method(self, method):
super(_EngineToConnMixin, self).setup_method(method)
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
def teardown_method(self, method):
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
super(_EngineToConnMixin, self).teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy",
conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_con_string_import_error(self):
if not SQLALCHEMY_INSTALLED:
conn = 'mysql://root@localhost/pandas_nosetest'
pytest.raises(ImportError, sql.read_sql, "SELECT * FROM iris",
conn)
else:
pytest.skip('SQLAlchemy is installed')
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
pytest.raises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test')
assert 'CREATE' in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, 'time') == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
pytest.skip(msg)
def setup_method(self, method):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(
"Can't connect to {0} server".format(self.flavor))
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
assert temp_conn.has_table('temp_frame')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
assert temp_conn.has_table('temp_frame')
pandasSQL.drop_table('temp_frame')
assert not temp_conn.has_table('temp_frame')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
pytest.raises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64': [2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp('2000-01-01 08:00:00')
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp('2000-06-01 07:00:00')
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == 'UTC'
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [Timestamp('2000-01-01 08:00:00', tz='UTC'),
Timestamp('2000-06-01 07:00:00', tz='UTC')]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError("DateCol loaded with incorrect type "
"-> {0}".format(col.dtype))
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, 'DateColWithTz'):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgrsql server version difference
col = df.DateColWithTz
assert (is_object_dtype(col.dtype) or
is_datetime64_dtype(col.dtype) or
is_datetime64tz_dtype(col.dtype))
df = pd.read_sql_query("select * from types_test_data",
self.conn, parse_dates=['DateColWithTz'])
if not hasattr(df, 'DateColWithTz'):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == 'UTC'
check(df.DateColWithTz)
df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
self.conn, chunksize=1)),
ignore_index=True)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == 'UTC'
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'IntDateCol': {'unit': 's'}})
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
assert isinstance(result.loc[0, 'A'], string_types)
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
result = res['a']
expected = to_datetime(df['a'])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == 'sqlite':
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A': [0, 1, 2], 'B': [0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A': [0, 1, 2], 'B': [np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A': [0, 1, 2], 'B': ['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df,
check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.TEXT)
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables['single_dtype_test'].columns['A'].type
sqltypeb = meta.tables['single_dtype_test'].columns['B'].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notna_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict['Bool'].type, my_type)
assert isinstance(col_dict['Date'].type, sqltypes.DateTime)
assert isinstance(col_dict['Int'].type, sqltypes.Integer)
assert isinstance(col_dict['Float'].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32': Series([V, ], dtype='float32'),
'f64': Series([V, ], dtype='float64'),
'f64_as_f32': Series([V, ], dtype='float64'),
'i32': Series([5, ], dtype='int32'),
'i64': Series([5, ], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32': sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
assert (np.round(df['f64'].iloc[0], 14) ==
np.round(res['f64'].iloc[0], 14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
assert str(col_dict['f32'].type) == str(col_dict['f64_as_f32'].type)
assert isinstance(col_dict['f32'].type, sqltypes.Float)
assert isinstance(col_dict['f64'].type, sqltypes.Float)
assert isinstance(col_dict['i32'].type, sqltypes.Integer)
assert isinstance(col_dict['i64'].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data',
con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql(
'test_foo_data', self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip(
"Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy(object):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a': [1, 2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
assert len(w) == 0
class _TestMySQLAlchemy(object):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql # noqa
cls.driver = 'pymysql'
except ImportError:
pytest.skip('pymysql not installed')
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b': [0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy(object):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2 # noqa
cls.driver = 'psycopg2'
except ImportError:
pytest.skip('psycopg2 not installed')
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({'col1': [1, 2], 'col2': [
0.1, 0.2], 'col3': ['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
pytest.raises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table(
'test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2',
index=False, if_exists='append')
res1 = sql.read_sql_table(
'test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
@pytest.mark.single
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def setup_method(self, method):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_iris_data()
self._load_test1_data()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
assert self.pandasSQL.has_table('drop_test_frame')
self.pandasSQL.drop_table('drop_test_frame')
assert not self.pandasSQL.has_table('drop_test_frame')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_query('SELECT * FROM test_time', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
if PY36:
pytest.skip("not working on python > 3.5")
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type(
'dtype_test', 'B') == 'INTEGER'
assert self._get_sqlite_column_type(
'dtype_test2', 'B') == 'STRING'
pytest.raises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
# single dtype
df.to_sql('single_dtype_test', self.conn, dtype='STRING')
assert self._get_sqlite_column_type(
'single_dtype_test', 'A') == 'STRING'
assert self._get_sqlite_column_type(
'single_dtype_test', 'B') == 'STRING'
def test_notna_dtype(self):
if self.flavor == 'mysql':
pytest.skip('Not applicable to MySQL legacy')
cols = {'Bool': Series([True, None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int': Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notna_dtype_test'
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, 'Bool') == 'INTEGER'
assert self._get_sqlite_column_type(tbl, 'Date') == 'TIMESTAMP'
assert self._get_sqlite_column_type(tbl, 'Int') == 'INTEGER'
assert self._get_sqlite_column_type(tbl, 'Float') == 'REAL'
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
pytest.raises(ValueError, df.to_sql, "", self.conn)
for ndx, weird_name in enumerate(
['test_weird_name]', 'test_weird_name[',
'test_weird_name`', 'test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'99beginswithnumber', '12345', u'\xe9']):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d' % ndx
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
def _skip_if_no_pymysql():
try:
import pymysql # noqa
except ImportError:
pytest.skip('pymysql not installed, skipping')
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
def setup_method(self, method):
self.method = method
self.conn = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
@tm.capture_stdout
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
@tm.capture_stdout
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(self.method)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.to_sql(frame2, name='test_table2', con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords', index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=['c0'])
sql.to_sql(mono_df, con=self.conn, name='mono_df', index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum([my_c0[0]
for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn,
name=table_name, if_exists='fail')
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
@pytest.mark.single
class TestSQLFlavorDeprecation(object):
"""
gh-13611: test that the 'flavor' parameter
is appropriately deprecated by checking the
functions that directly raise the warning
"""
con = 1234 # don't need real connection for this
funcs = ['SQLiteDatabase', 'pandasSQL_builder']
def test_unsupported_flavor(self):
msg = 'is not supported'
for func in self.funcs:
tm.assert_raises_regex(ValueError, msg, getattr(sql, func),
self.con, flavor='mysql')
def test_deprecated_flavor(self):
for func in self.funcs:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(sql, func)(self.con, flavor='sqlite')
@pytest.mark.single
@pytest.mark.skip(reason="gh-13611: there is no support for MySQL "
"if SQLAlchemy is not installed")
class TestXMySQL(MySQLMixIn):
@classmethod
def setup_class(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
pytest.skip(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
pytest.skip(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setup_method(self, method):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root',
passwd='', db='pandas_nosetest')
except:
pass
else:
return
try:
self.conn = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError:
pytest.skip(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error:
pytest.skip(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
self.method = method
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_chunksize_read_type(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.index.name = "index"
drop_sql = "DROP TABLE IF EXISTS test"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.to_sql(frame, name='test', con=self.conn)
query = "select * from test"
chunksize = 5
chunk_gen = pd.read_sql_query(sql=query, con=self.conn,
chunksize=chunksize, index_col="index")
chunk_df = next(chunk_gen)
tm.assert_frame_equal(frame[:chunksize], chunk_df)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test')
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.iloc[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
assert tokens[1] == 'DATETIME'
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', keys=['A', 'B'])
lines = create_sql.splitlines()
assert 'PRIMARY KEY (`A`, `B`)' in create_sql
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
@tm.capture_stdout
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
@tm.capture_stdout
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
# Initialize connection again (needed for tearDown)
self.setup_method(self.method)
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame, name='test_table', con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.to_sql(frame2, name='test_table2',
con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
_skip_if_no_pymysql()
df = DataFrame({'From': np.ones(5)})
sql.to_sql(df, con=self.conn, name='testkeywords',
if_exists='replace', index=False)
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame(
{'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
pytest.raises(ValueError,
sql.to_sql,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists='fail')
# test if_exists='replace'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='replace', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.to_sql(frame=df_if_exists_1, con=self.conn, name=table_name,
if_exists='fail', index=False)
assert tquery(sql_select, con=self.conn) == [(1, 'A'), (2, 'B')]
sql.to_sql(frame=df_if_exists_2, con=self.conn, name=table_name,
if_exists='append', index=False)
assert (tquery(sql_select, con=self.conn) ==
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
|
bsd-3-clause
|
jstarc/deep_reasoning
|
visualize.py
|
2
|
3189
|
import numpy as np
import load_data
from generative_alg import *
from keras.utils.generic_utils import Progbar
from load_data import load_word_indices
from keras.preprocessing.sequence import pad_sequences
import pandas as pa
import augment
def test_points(premises, labels, noises, gtest, cmodel, hypo_len):
p = Progbar(len(premises))
hypos = []
bs = 64
for i in range(len(labels) / bs):
words, _ = generative_predict_beam(gtest, premises[i * bs: (i+1)*bs],
noises[i * bs: (i+1)*bs,None,:], labels[i * bs: (i+1)*bs], True, hypo_len)
hypos.append(words)
p.add(len(words))
hypos = np.vstack(hypos)
cpreds = cmodel.evaluate([premises[:len(hypos)], hypos], labels[:len(hypos)])
print cpreds
def print_hypos(premise, label, gen_test, beam_size, hypo_len, noise_size, wi):
words = single_generate(premise, label, gen_test, beam_size, hypo_len, noise_size)
batch_size = gen_test[0].input_layers[0].input_shape[0]
per_batch = batch_size / beam_size
premises = [premise] * per_batch
noise_input = np.random.normal(scale=0.11, size=(per_batch, 1, noise_size))
class_indices = np.ones(per_batch) * label
class_indices = load_data.convert_to_one_hot(class_indices, 3)
words, loss = generative_predict_beam(gen_test, premises, noise_input,
class_indices, True, hypo_len)
print 'Premise:', wi.print_seq(premise)
print 'Label:', load_data.LABEL_LIST[label]
print
print 'Hypotheses:'
for h in words:
print wi.print_seq(h)
def load_sentence(string, wi, len = 25):
tokens = string.split()
tokens = load_word_indices(tokens, wi.index)
return pad_sequences([tokens], maxlen = len, padding = 'pre')[0]
def find_true_examples():
models = ['8-150-2', '8-150-4', '8-150-8', '8-150-16', '8-150-32', '8-150-147', '6-150-8', '7-150-8' ,'9-226-8']
final_premises = set()
subset = {}
for model in models:
data = pa.read_csv('models/real' + model + '/dev1')
data = data[data['ctrue']]
neutr = data[data['label'] == 'neutral']
contr = data[data['label'] == 'contradiction']
entail = data[data['label'] == 'entailment']
subset[model] = [neutr, contr, entail]
premises = set(neutr['premise']) & set(contr['premise']) & set(entail['premise'])
if len(final_premises) == 0:
final_premises = premises
else:
final_premises &= premises
final_premises = list(final_premises)
with open('results/ext_examples.txt', 'w') as fi:
for i in range(len(final_premises)):
premise = final_premises[i]
fi.write(premise + '\n')
for m in models:
fi.write(m + '\n')
for l in range(3):
filtered = subset[m][l][subset[m][l]['premise'] == premise]
for f in range(len(filtered)):
hypo = filtered['hypo'].iloc[f]
label = filtered['label'].iloc[f][:4]
fi.write(label + '\t' + hypo + '\n')
fi.write('\n')
|
mit
|
kjung/scikit-learn
|
sklearn/tree/tree.py
|
1
|
122456
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil, floor
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._criterion import Entropy
from ._criterion import Gini
from ._criterion import PowersCriterion
from ._criterion import VarianceCriterion
from ._splitter import Splitter
from ._splitter import PropensitySplitter
from ._splitter import PowersSplitter
from ._splitter import VarianceSplitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import PropensityTreeBuilder
from ._tree import DoubleSampleTreeBuilder
from ._tree import PowersTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
"PropensityTree",
"DoubleSampleTree",
"PowersTree"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter,
"propensity":_splitter.PropensitySplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class PropensityTree(BaseDecisionTree, ClassifierMixin):
"""A propensity tree classifier. This is a base model for Wager & Athey's
causal forests algorithm 2.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples of each treatment class required to
be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
max_depth=None,
min_samples_split=2,
min_samples_leaf=3,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(PropensityTree, self).__init__(
criterion="propensity",
splitter="propensity",
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
# NB - the fit method here overrides the fit method from the base class, BaseDecisionTree.
def fit(self, X, y, w, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a propensity tree from the training set (X, y, w).
This is algorithm 2 from Wager & Athey. We fit a tree that
uses the treatment variable w to determine splits, but predict
using the outcome y.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples]
Array of outcome indicators.
w : array-lie, shape = [n_samples]
Array of treatment indicators.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
is_classification = True
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
w = check_array(w, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
w = np.atleast_1d(w)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if w.ndim == 1:
w = np.reshape(w, (-1, 1))
# We will hard assign b/c multi-class doesn't make sense in this context.
self.n_outputs_ = 1
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
w_original = np.copy(w)
w_encoded = np.zeros(w.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, w_encoded[:, k] = np.unique(w[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
w = w_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, w_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(w, "dtype", None) != DOUBLE or not w.flags.contiguous:
w = np.ascontiguousarray(w, dtype=DOUBLE)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of outcomees=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if len(w) != n_samples:
raise ValueError("Number of treatments=%d does not match "
"number of samples=%d" % (len(w), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
# criterion = PropensityCriterion(self.n_outputs_,
# self.n_classes_)
criterion = Gini(self.n_outputs_, self.n_classes_)
splitter = PropensitySplitter(criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use version of DepthFirstTreeBuilder...
builder = PropensityTreeBuilder(splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
builder.build(self.tree_, X, w, sample_weight, X_idx_sorted)
# Now apply to training data and get the leaf node assignments; keep them in an instance var.
self.y = np.copy(y)
self.w = np.copy(w)
self.training_sample_nodes = self.tree_.apply(X)
# How do we do this? We want to average the outcome in each node, conditioned on w.
# Then we just subtract...
num_bins = np.max(self.training_sample_nodes) + 1
self.treated_y = np.zeros(num_bins)
self.control_y = np.zeros(num_bins)
self.treated_n = np.zeros(num_bins)
self.control_n = np.zeros(num_bins)
for i in range(0, len(self.training_sample_nodes)) :
node_idx = self.training_sample_nodes[i]
if (self.w[i] == 0) :
self.control_y[node_idx] += self.y[i]
self.control_n[node_idx] += 1
else :
self.treated_y[node_idx] += self.y[i]
self.treated_n[node_idx] += 1
for i in range(num_bins) :
if self.control_n[i] == 0 :
self.control_n[i] = -1
if self.treated_n[i] == 0 :
self.treated_n[i] = -1
self.treated_mean_y = self.treated_y / self.treated_n
self.control_mean_y = self.control_y / self.control_n
self.effect_estimates = self.treated_mean_y - self.control_mean_y
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_effect(self, X) :
"""Predict the treatment effect for each sample in X.
Returns
-------
y : array of shape = [n_samples]
The estimated treatment effect for each sample in X.
"""
nodes = self.apply(X)
return np.take(self.effect_estimates, nodes)
def predict_outcomes(self, X) :
"""Predict the outcomes for treated and untreated for each sample in X;
return as 2 column array."""
nodes = self.apply(X)
tx_outcomes = np.take(self.treated_mean_y, nodes)
ctl_outcomes = np.take(self.control_mean_y, nodes)
return np.transpose(np.matrix([tx_outcomes, ctl_outcomes]))
# NB - the predict method here overrides the predict method from the base class, BaseDecisionTree.
# Basically, we just use apply and get the (precomputed) tx effects for each of the leaf nodes.
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
# NB - the apply method here overrides the apply method from the base class, BaseDecisionTree.
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DoubleSampleTree(BaseDecisionTree, ClassifierMixin):
"""A double sample tree. This is a base model for Wager & Athey's
causal forests algorithm 1 (double sample forests).
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples of each treatment class required to
be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
max_depth=None,
min_samples_split=2,
min_samples_leaf=3,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DoubleSampleTree, self).__init__(
criterion="variance",
splitter="variance",
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
# NB - the fit method here overrides the fit method from the base class, BaseDecisionTree.
def fit(self, X, y, w, sample_weight=None, check_input=True, X_idx_sorted=None):
"""Build a double sample tree from the training set (X, y, w).
This is algorithm 1 from Wager & Athey. We fit a tree using
half the data to find split points (using a variance maximizing
criterion) and use the rest of the data to estimate treatment
effects in the leaves.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples]
Array of outcome indicators.
w : array-lie, shape = [n_samples]
Array of treatment indicators.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
is_classification = True
# Determine output settings
n_samples, self.n_features_ = X.shape
# self.split_indices is used to partition samples into two sets -
# split_indices == 0 - used for estimation of treatment effects
# split_indices == 1 - used to find splits.
n_split = int(floor(n_samples / 2))
self.split_indices = np.zeros(n_samples, dtype=np.int)
self.split_indices[ np.random.choice(n_samples, size=n_split, replace=False) ] = 1
# dtype is int64, which is int, and is cdef'ed to SIZE_t in most pxd files.
n_samples, self.n_features_ = X.shape
# And enforce this
X_idx_sorted = None
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
w = check_array(w, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
y = np.atleast_1d(y)
w = np.atleast_1d(w)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if w.ndim == 1:
w = np.reshape(w, (-1, 1))
# We will hard assign b/c multi-class doesn't make sense in this context.
self.n_outputs_ = 1
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
w_original = np.copy(w)
w_encoded = np.zeros(w.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, w_encoded[:, k] = np.unique(w[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
w = w_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, w_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(w, "dtype", None) != DOUBLE or not w.flags.contiguous:
w = np.ascontiguousarray(w, dtype=DOUBLE)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of outcomees=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if len(w) != n_samples:
raise ValueError("Number of treatments=%d does not match "
"number of samples=%d" % (len(w), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
# if len(sample_weight) != n_samples:
# raise ValueError("Number of weights=%d does not match "
# "number of samples=%d" %
# (len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = VarianceCriterion(self.n_outputs_,
self.n_classes_)
splitter = VarianceSplitter(criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use version of DepthFirstTreeBuilder...
builder = DoubleSampleTreeBuilder(splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
builder.build(self.tree_, X, y, w, self.split_indices, sample_weight, X_idx_sorted)
# Now apply to estimation data and get the leaf node assignments; keep them in an instance var.
# The problem is - what if some leaf nodes don't have any samples from which to estimate the
# treatment effect? And what about the requirement for a min number of samples in each leaf node?
# What do we do then!...
self.training_sample_nodes = self.tree_.apply(X)
# Calculate effect estimates on each.
# How do we do this? We want to average the outcome in each node, conditioned on w.
# num_bins is number of nodes in the tree (not just leaf nodes).
# We keep arrays treated_y, control_y, etc, that keep track of quantities in
# each node, but only for leaf nodes in practice.
num_bins = np.max(self.training_sample_nodes) + 1
self.treated_y = np.zeros(num_bins)
self.control_y = np.zeros(num_bins)
self.treated_n = np.zeros(num_bins)
self.control_n = np.zeros(num_bins)
# For each sample, if it wasn't used to find the splits (split_indices[i] == 0)
# then use it for effect estimation.
for i in range(0, len(self.training_sample_nodes)) :
if self.split_indices[i] == 0 :
node_idx = self.training_sample_nodes[i]
if (w[i] == 0) :
self.control_y[node_idx] += y[i]
self.control_n[node_idx] += 1
else :
self.treated_y[node_idx] += y[i]
self.treated_n[node_idx] += 1
# Guard against divide by zero; should execute this...
# presumably, all non-leaf nodes should have -1 as the value.
for i in range(num_bins) :
if self.control_n[i] == 0 :
self.control_n[i] = -1
if self.treated_n[i] == 0 :
self.treated_n[i] = -1
# Calculate means in each leaf node, then subtract to get effect estimates.
# note - this does not use any guards against 0 or 1 probabilities...
self.treated_mean_y = self.treated_y / self.treated_n
self.control_mean_y = self.control_y / self.control_n
self.effect_estimates = self.treated_mean_y - self.control_mean_y
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_effect(self, X) :
"""Predict the treatment effect for each sample in X.
Returns
-------
y : array of shape = [n_samples]
The estimated treatment effect for each sample in X.
"""
nodes = self.apply(X)
return np.take(self.effect_estimates, nodes)
def predict_outcomes(self, X) :
"""Predict the outcomes for treated and untreated for each sample in X;
return as 2 column array."""
nodes = self.apply(X)
tx_outcomes = np.take(self.treated_mean_y, nodes)
ctl_outcomes = np.take(self.control_mean_y, nodes)
return np.transpose(np.matrix([tx_outcomes, ctl_outcomes]))
# NB - the predict method here overrides the predict method from the base class, BaseDecisionTree.
# Basically, we just use apply and get the (precomputed) tx effects for each of the leaf nodes.
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
# NB - the apply method here overrides the apply method from the base class, BaseDecisionTree.
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class PowersTree(BaseDecisionTree, ClassifierMixin):
"""A tree for estimating causal effects using Scott Power's split criterion.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples of each treatment class required to
be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
max_depth=None,
min_samples_split=2,
min_samples_leaf=3,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(PowersTree, self).__init__(
criterion="PowersCriterion",
splitter="PowersSplitter",
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
# NB - the fit method here overrides the fit method from the base class, BaseDecisionTree.
def fit(self, X, y, w, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a causal tree from the training set (X, y, w).
using Scott Powers' split criterion.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples]
Array of outcome indicators.
w : array-lie, shape = [n_samples]
Array of treatment indicators.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
is_classification = True
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
w = check_array(w, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
w = np.atleast_1d(w)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if w.ndim == 1:
w = np.reshape(w, (-1, 1))
# We will hard assign b/c multi-class doesn't make sense in this context.
self.n_outputs_ = 1
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
w_original = np.copy(w)
w_encoded = np.zeros(w.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, w_encoded[:, k] = np.unique(w[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
w = w_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, w_original)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(w, "dtype", None) != DOUBLE or not w.flags.contiguous:
w = np.ascontiguousarray(w, dtype=DOUBLE)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of outcomees=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if len(w) != n_samples:
raise ValueError("Number of treatments=%d does not match "
"number of samples=%d" % (len(w), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = PowersCriterion()
if np.unique(y).shape[0] == 2 :
criterion.set_binary_outcome(1)
else :
criterion.set_binary_outcome(0)
splitter = PowersSplitter(criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use version of DepthFirstTreeBuilder...
builder = PowersTreeBuilder(splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
builder.build(self.tree_, X, y, w, sample_weight, X_idx_sorted)
# Now apply to training data and get the leaf node assignments; keep them in an instance var.
self.y = np.copy(y)
self.w = np.copy(w)
self.training_sample_nodes = self.tree_.apply(X)
# TODO: Calculate effect estimates on each.
# How do we do this? We want to average the outcome in each node, conditioned on w.
# Then we just subtract...
num_bins = np.max(self.training_sample_nodes) + 1
self.treated_y = np.zeros(num_bins)
self.control_y = np.zeros(num_bins)
self.treated_n = np.zeros(num_bins)
self.control_n = np.zeros(num_bins)
for i in range(0, len(self.training_sample_nodes)) :
node_idx = self.training_sample_nodes[i]
if (self.w[i] == 0) :
self.control_y[node_idx] += self.y[i]
self.control_n[node_idx] += 1
else :
self.treated_y[node_idx] += self.y[i]
self.treated_n[node_idx] += 1
# Note - should never have -1 counts because of the min number constraint...
for i in range(num_bins) :
if self.control_n[i] == 0 :
self.control_n[i] = -1
if self.treated_n[i] == 0 :
self.treated_n[i] = -1
self.treated_mean_y = self.treated_y / self.treated_n
self.control_mean_y = self.control_y / self.control_n
self.effect_estimates = self.treated_mean_y - self.control_mean_y
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_effect(self, X) :
"""Predict the treatment effect for each sample in X.
Returns
-------
y : array of shape = [n_samples]
The estimated treatment effect for each sample in X.
"""
nodes = self.apply(X)
return np.take(self.effect_estimates, nodes)
def predict_outcomes(self, X) :
"""Predict the outcomes for treated and untreated for each sample in X;
return as 2 column array."""
nodes = self.apply(X)
tx_outcomes = np.take(self.treated_mean_y, nodes)
ctl_outcomes = np.take(self.control_mean_y, nodes)
return np.transpose(np.matrix([tx_outcomes, ctl_outcomes]))
# NB - the predict method here overrides the predict method from the base class, BaseDecisionTree.
# Basically, we just use apply and get the (precomputed) tx effects for each of the leaf nodes.
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
# NB - the apply method here overrides the apply method from the base class, BaseDecisionTree.
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
|
bsd-3-clause
|
icdishb/scikit-learn
|
sklearn/cluster/tests/test_spectral.py
|
262
|
7954
|
"""Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
|
bsd-3-clause
|
dch312/numpy
|
numpy/linalg/linalg.py
|
4
|
74335
|
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
.. versionadded:: 1.10.0
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
ndim = x.ndim
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
# cost1 = cost((AB)C)
cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
# cost2 = cost((AB)C)
cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
|
bsd-3-clause
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/notebook/notebookapp.py
|
3
|
55293
|
# coding: utf-8
"""A tornado based Jupyter notebook server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import absolute_import, print_function
import notebook
import binascii
import datetime
import errno
import importlib
import io
import json
import logging
import mimetypes
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import warnings
import webbrowser
try: #PY3
from base64 import encodebytes
except ImportError: #PY2
from base64 import encodestring as encodebytes
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The Jupyter Notebook requires tornado >= 4.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (4,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
from notebook import (
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
# py23 compatibility
try:
raw_input = raw_input
except NameError:
raw_input = input
from .base.handlers import Template404, RedirectWithParams
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.contents.largefilemanager import LargeFileManager
from .services.sessions.sessionmanager import SessionManager
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import FileFindHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp, base_flags, base_aliases,
)
from jupyter_core.paths import jupyter_config_path
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager, NoSuchKernel, NATIVE_KERNEL_NAME
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type, Float, observe, default, validate
)
from ipython_genutils import py3compat
from jupyter_core.paths import jupyter_runtime_dir, jupyter_path
from notebook._sysinfo import get_sys_info
from ._tz import utcnow
from .utils import url_path_join, check_pid, url_escape
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
jupyter notebook # start the notebook
jupyter notebook --certfile=mycert.pem # use SSL/TLS certificate
jupyter notebook password # enter a password to protect the server
"""
DEV_NOTE_NPM = """It looks like you're running the notebook from source.
If you're working on the Javascript of the notebook, try running
npm run build:watch
in another terminal window to have the system incrementally
watch and build the notebook's JavaScript for you, as you make changes.
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'notebook.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager, log,
base_url, default_url, settings_overrides, jinja_env_options):
# If the user is running the notebook in a git directory, make the assumption
# that this is a dev install and suggest to the developer `npm run build:watch`.
base_dir = os.path.realpath(os.path.join(__file__, '..', '..'))
dev_mode = os.path.exists(os.path.join(base_dir, '.git'))
if dev_mode:
log.info(DEV_NOTE_NPM)
settings = self.init_settings(
jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager, config_manager, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
session_manager, kernel_spec_manager,
config_manager,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
jupyter_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(loader=FileSystemLoader(template_path), **jenv_opt)
sys_info = get_sys_info()
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
if jupyter_app.ignore_minified_js:
log.warning("""The `ignore_minified_js` flag is deprecated and no
longer works. Alternatively use `npm run build:watch` when
working on the notebook's Javascript and LESS""")
warnings.warn("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0", DeprecationWarning)
now = utcnow()
root_dir = contents_manager.root_dir
home = os.path.expanduser('~')
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = '~' + root_dir[len(home):]
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=jupyter_app.static_file_path,
static_custom_path=jupyter_app.static_custom_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
ignore_minified_js=jupyter_app.ignore_minified_js,
# rate limits
iopub_msg_rate_limit=jupyter_app.iopub_msg_rate_limit,
iopub_data_rate_limit=jupyter_app.iopub_data_rate_limit,
rate_limit_window=jupyter_app.rate_limit_window,
# maximum request sizes - support saving larger notebooks
# tornado defaults are 100 MiB, we increase it to 0.5 GiB
max_body_size = 512 * 1024 * 1024,
max_buffer_size = 512 * 1024 * 1024,
# authentication
cookie_secret=jupyter_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=jupyter_app.login_handler_class,
logout_handler_class=jupyter_app.logout_handler_class,
password=jupyter_app.password,
xsrf_cookies=True,
disable_check_xsrf=jupyter_app.disable_check_xsrf,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# Jupyter stuff
started=now,
jinja_template_vars=jupyter_app.jinja_template_vars,
nbextensions_path=jupyter_app.nbextensions_path,
websocket_url=jupyter_app.websocket_url,
mathjax_url=jupyter_app.mathjax_url,
mathjax_config=jupyter_app.mathjax_config,
config=jupyter_app.config,
config_dir=jupyter_app.config_dir,
server_root_dir=root_dir,
jinja2_env=env,
terminals_available=False, # Set later if terminals are available
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
handlers.extend(load_handlers('tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('files.handlers'))
handlers.extend(load_handlers('view.handlers'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('bundler.handlers'))
handlers.extend(load_handlers('kernelspecs.handlers'))
handlers.extend(load_handlers('edit.handlers'))
handlers.extend(load_handlers('services.api.handlers'))
handlers.extend(load_handlers('services.config.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.contents.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
handlers.extend(load_handlers('services.kernelspecs.handlers'))
handlers.extend(load_handlers('services.security.handlers'))
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
handlers.append(
(r"/custom/(.*)", FileFindHandler, {
'path': settings['static_custom_path'],
'no_cache_paths': ['/'], # don't cache anything in custom
})
)
# register base handlers last
handlers.extend(load_handlers('base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', RedirectWithParams, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NotebookPasswordApp(JupyterApp):
"""Set a password for the notebook server.
Setting a password secures the notebook server
and removes the need for token-based authentication.
"""
description = __doc__
def _config_file_default(self):
return os.path.join(self.config_dir, 'jupyter_notebook_config.json')
def start(self):
from .auth.security import set_password
set_password(config_file=self.config_file)
self.log.info("Wrote hashed password to %s" % self.config_file)
class NbserverListApp(JupyterApp):
version = __version__
description="List currently running notebook servers."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.runtime_dir):
if self.json:
print(json.dumps(serverinfo))
else:
url = serverinfo['url']
if serverinfo.get('token'):
url = url + '?token=%s' % serverinfo['token']
print(url, "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
"DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
flags['allow-root']=(
{'NotebookApp' : {'allow_root' : True}},
"Allow the notebook to be run from root user."
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'client-ca': 'NotebookApp.client_ca',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(JupyterApp):
name = 'jupyter-notebook'
version = __version__
description = """
The Jupyter HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, Session, MappingKernelManager,
ContentsManager, FileContentsManager, NotebookNotary,
KernelSpecManager,
]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
password=(NotebookPasswordApp, NotebookPasswordApp.description.splitlines()[0]),
)
_log_formatter_cls = LogFormatter
@default('log_level')
def _default_log_level(self):
return logging.INFO
@default('log_datefmt')
def _default_log_datefmt(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
@default('log_format')
def _default_log_format(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
ignore_minified_js = Bool(False,
config=True,
help='Deprecated: Use minified JS file or not, mainly use during dev to avoid JS recompilation',
)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help="Set the Access-Control-Allow-Credentials: true header"
)
allow_root = Bool(False, config=True,
help="Whether to allow the user to run the notebook as root."
)
default_url = Unicode('/tree', config=True,
help="The default URL to redirect to from `/`"
)
ip = Unicode('localhost', config=True,
help="The IP address the notebook server will listen on."
)
@default('ip')
def _default_ip(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warning("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
@validate('ip')
def _valdate_ip(self, proposal):
value = proposal['value']
if value == u'*':
value = u''
return value
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
client_ca = Unicode(u'', config=True,
help="""The full path to a certificate authority certificate for SSL/TLS client authentication."""
)
cookie_secret_file = Unicode(config=True,
help="""The file where the cookie secret is stored."""
)
@default('cookie_secret_file')
def _default_cookie_secret_file(self):
return os.path.join(self.runtime_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
@default('cookie_secret')
def _default_cookie_secret(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
return f.read()
else:
secret = encodebytes(os.urandom(1024))
self._write_cookie_secret_file(secret)
return secret
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file)
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warning(
"Could not set permissions on %s",
self.cookie_secret_file
)
token = Unicode('<generated>',
help="""Token used for authenticating first-time connections to the server.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
"""
).tag(config=True)
one_time_token = Unicode(
help="""One-time token used for opening a browser.
Once used, this token cannot be used again.
"""
)
_token_generated = True
@default('token')
def _token_default(self):
if self.password:
# no token if password is enabled
self._token_generated = False
return u''
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode('ascii')
@observe('token')
def _token_changed(self, change):
self._token_generated = False
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from notebook.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
password_required = Bool(False, config=True,
help="""Forces users to use a password for the Notebook server.
This is useful in a multi user environment, for instance when
everybody in the LAN can access each other's machine though ssh.
In such a case, server the notebook server on localhost is not secure
since any user can connect to the notebook server via ssh.
"""
)
disable_check_xsrf = Bool(False, config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="DEPRECATED, use tornado_settings"
)
@observe('webapp_settings')
def _update_webapp_settings(self, change):
self.log.warning("\n webapp_settings is deprecated, use tornado_settings.\n")
self.tornado_settings = change['new']
tornado_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"Jupyter notebook uses.")
terminado_settings = Dict(config=True,
help='Supply overrides for terminado. Currently only supports "shell_command".')
cookie_options = Dict(config=True,
help="Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details."
)
ssl_options = Dict(config=True,
help="""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details.""")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
jinja_template_vars = Dict(
config=True,
help="Extra variables to supply to jinja templates when rendering.",
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
@observe('enable_mathjax')
def _update_enable_mathjax(self, change):
"""set mathjax url to empty if mathjax is disabled"""
if not change['new']:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
@validate('base_url')
def _update_base_url(self, proposal):
value = proposal['value']
if not value.startswith('/'):
value = '/' + value
elif not value.endswith('/'):
value = value + '/'
return value
base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""")
@observe('base_project_url')
def _update_base_project_url(self, change):
self.log.warning("base_project_url is deprecated, use base_url")
self.base_url = change['new']
extra_static_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(),
help="""Path to search for custom.js, css"""
)
@default('static_custom_path')
def _default_static_custom_path(self):
return [
os.path.join(d, 'custom') for d in (
self.config_dir,
DEFAULT_STATIC_FILES_PATH)
]
extra_template_paths = List(Unicode(), config=True,
help="""Extra paths to search for serving jinja templates.
Can be used to override templates from notebook.templates."""
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode(), config=True,
help="""extra paths to look for Javascript notebook extensions"""
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = self.extra_nbextensions_path + jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""A custom url for MathJax.js.
Should be in the form of a case-sensitive url to MathJax,
for example: /static/components/MathJax/MathJax.js
"""
)
@default('mathjax_url')
def _default_mathjax_url(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix", "static")
return url_path_join(static_url_prefix, 'components', 'MathJax', 'MathJax.js')
@observe('mathjax_url')
def _update_mathjax_url(self, change):
new = change['new']
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
mathjax_config = Unicode("TeX-AMS-MML_HTMLorMML-full,Safe", config=True,
help="""The MathJax.js configuration file that is to be used."""
)
@observe('mathjax_config')
def _update_mathjax_config(self, change):
self.log.info("Using MathJax configuration file: %s", change['new'])
contents_manager_class = Type(
default_value=LargeFileManager,
klass=ContentsManager,
config=True,
help='The notebook manager class to use.'
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
config=True,
help='The kernel manager class to use.'
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help='The session manager class to use.'
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help='The config manager class to use'
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help='The login handler class to use.',
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help='The logout handler class to use.',
)
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
@default('info_file')
def _default_info_file(self):
info_file = "nbserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
pylab = Unicode('disabled', config=True,
help="""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
"""
)
@observe('pylab')
def _update_pylab(self, change):
"""when --pylab is specified, display a warning and exit"""
if change['new'] != 'warn':
backend = ' %s' % change['new']
else:
backend = ''
self.log.error("Support for specifying --pylab on the command line has been removed.")
self.log.error(
"Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help="The directory to use for notebooks and kernels."
)
@default('notebook_dir')
def _default_notebook_dir(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
@validate('notebook_dir')
def _notebook_dir_validate(self, proposal):
value = proposal['value']
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
raise TraitError("No such notebook dir: %r" % value)
return value
@observe('notebook_dir')
def _update_notebook_dir(self, change):
"""Do a bit of validation of the notebook dir."""
# setting App.notebook_dir implies setting notebook and kernel dirs as well
new = change['new']
self.config.FileContentsManager.root_dir = new
self.config.MappingKernelManager.root_dir = new
# TODO: Remove me in notebook 5.0
server_extensions = List(Unicode(), config=True,
help=("DEPRECATED use the nbserver_extensions dict instead")
)
@observe('server_extensions')
def _update_server_extensions(self, change):
self.log.warning("server_extensions is deprecated, use nbserver_extensions")
self.server_extensions = change['new']
nbserver_extensions = Dict({}, config=True,
help=("Dict of Python modules to load as notebook server extensions."
"Entry values can be used to enable and disable the loading of"
"the extensions. The extensions will be loaded in alphabetical "
"order.")
)
reraise_server_extension_failures = Bool(
False,
config=True,
help="Reraise exceptions encountered loading server extensions?",
)
iopub_msg_rate_limit = Float(1000, config=True, help="""(msgs/sec)
Maximum rate at which messages can be sent on iopub before they are
limited.""")
iopub_data_rate_limit = Float(1000000, config=True, help="""(bytes/sec)
Maximum rate at which messages can be sent on iopub before they are
limited.""")
rate_limit_window = Float(3, config=True, help="""(sec) Time window used to
check the message and data rate limits.""")
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
self.tornado_settings['cookie_options'] = self.cookie_options
self.tornado_settings['token'] = self.token
if (self.open_browser or self.file_to_run) and not self.password:
self.one_time_token = binascii.hexlify(os.urandom(24)).decode('ascii')
self.tornado_settings['one_time_token'] = self.one_time_token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
if self.password_required and (not self.password):
self.log.critical("Notebook servers are configured to only be run with a password.")
self.log.critical("Hint: run the following command to set a password")
self.log.critical("\t$ python -m notebook.auth password")
sys.exit(1)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.session_manager, self.kernel_spec_manager,
self.config_manager,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if self.client_ca:
ssl_options['ca_certs'] = self.client_ca
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# Disable SSLv3 by default, since its use is discouraged.
ssl_options.setdefault('ssl_version', ssl.PROTOCOL_TLSv1)
if ssl_options.get('ca_certs', False):
ssl_options.setdefault('cert_reqs', ssl.CERT_REQUIRED)
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
url = self._url(ip)
if self.token:
# Don't log full token if it came from config
token = self.token if self._token_generated else '...'
url = url_concat(url, {'token': token})
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_terminals(self):
try:
from .terminal import initialize
initialize(self.web_app, self.notebook_dir, self.connection_url, self.terminado_settings)
self.web_app.settings['terminals_available'] = True
except ImportError as e:
log = self.log.debug if sys.platform == 'win32' else self.log.warning
log("Terminals not available (error was %s)", e)
def init_signal(self):
if not sys.platform.startswith('win') and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.current().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.current().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.current().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
# TODO: Remove me in notebook 5.0
for modulename in self.server_extensions:
# Don't override disable state of the extension if it already exist
# in the new traitlet
if not modulename in self.nbserver_extensions:
self.nbserver_extensions[modulename] = True
# Load server extensions with ConfigManager.
# This enables merging on keys, which we want for extension enabling.
# Regular config loading only merges at the class level,
# so each level (user > env > system) clobbers the previous.
config_path = jupyter_config_path()
if self.config_dir not in config_path:
# add self.config_dir to the front, if set manually
config_path.insert(0, self.config_dir)
manager = ConfigManager(read_config_path=config_path)
section = manager.get(self.config_file_name)
extensions = section.get('NotebookApp', {}).get('nbserver_extensions', {})
for modulename, enabled in self.nbserver_extensions.items():
if modulename not in extensions:
# not present in `extensions` means it comes from Python config,
# so we need to add it.
# Otherwise, trust ConfigManager to have loaded it.
extensions[modulename] = enabled
for modulename, enabled in sorted(extensions.items()):
if enabled:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warning("Error loading server extension %s", modulename,
exc_info=True)
def init_mime_overrides(self):
# On some Windows machines, an application has registered an incorrect
# mimetype for CSS in the registry. Tornado uses this when serving
# .css files, causing browsers to reject the stylesheet. We know the
# mimetype always needs to be text/css, so we override it here.
mimetypes.add_type('text/css', '.css')
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
if self._dispatching:
return
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
self.init_mime_overrides()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The Jupyter Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'token': self.token,
'notebook_dir': os.path.abspath(self.notebook_dir),
'password': bool(self.password),
'pid': os.getpid(),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if not self.allow_root:
# check if we are running as root, and abort if it's not allowed
try:
uid = os.geteuid()
except AttributeError:
uid = -1 # anything nonzero here, since we can't check UID assume non-root
if uid == 0:
self.log.critical("Running as root is not recommended. Use --allow-root to bypass.")
self.exit(1)
super(NotebookApp, self).start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
if 'dev' in notebook.__version__:
info("Welcome to Project Jupyter! Explore the various tools available"
" and their corresponding documentation. If you are interested"
" in contributing to the platform, please visit the community"
"resources section at http://jupyter.org/community.html.")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical("%s does not exist" % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_escape(url_path_join('notebooks', *relpath.split(os.sep)))
else:
# default_url contains base_url, but so does connection_url
uri = self.default_url[len(self.base_url):]
if self.one_time_token:
uri = url_concat(uri, {'token': self.one_time_token})
if browser:
b = lambda : browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
self.log.critical('\n'.join([
'\n',
'Copy/paste this URL into your browser when you connect for the first time,',
'to login with a token:',
' %s' % url_concat(self.connection_url, {'token': self.token}),
]))
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.remove_server_info_file()
self.cleanup_kernels()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(runtime_dir=None):
"""Iterate over the server info files of running notebook servers.
Given a runtime directory, find nbserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file in os.listdir(runtime_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(runtime_dir, file), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file))
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
main = launch_new_instance = NotebookApp.launch_instance
|
bsd-2-clause
|
HyperloopTeam/FullOpenMDAO
|
lib/python2.7/site-packages/matplotlib/tests/test_mlab.py
|
9
|
117623
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import tempfile
from numpy.testing import assert_allclose, assert_array_equal
import numpy.ma.testutils as matest
import numpy as np
from nose.tools import (assert_equal, assert_almost_equal, assert_not_equal,
assert_true, assert_raises)
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from matplotlib.testing.decorators import knownfailureif, CleanupTestCase
try:
from mpl_toolkits.natgrid import _natgrid
HAS_NATGRID = True
except ImportError:
HAS_NATGRID = False
class general_testcase(CleanupTestCase):
def test_colinear_pca(self):
a = mlab.PCA._get_colinear()
pca = mlab.PCA(a)
assert_allclose(pca.fracs[2:], 0., atol=1e-8)
assert_allclose(pca.Y[:, 2:], 0., atol=1e-8)
def test_prctile(self):
# test odd lengths
x = [1, 2, 3]
assert_equal(mlab.prctile(x, 50), np.median(x))
# test even lengths
x = [1, 2, 3, 4]
assert_equal(mlab.prctile(x, 50), np.median(x))
# derived from email sent by jason-sage to MPL-user on 20090914
ob1 = [1, 1, 2, 2, 1, 2, 4, 3, 2, 2, 2, 3,
4, 5, 6, 7, 8, 9, 7, 6, 4, 5, 5]
p = [0, 75, 100]
expected = [1, 5.5, 9]
# test vectorized
actual = mlab.prctile(ob1, p)
assert_allclose(expected, actual)
# test scalar
for pi, expectedi in zip(p, expected):
actuali = mlab.prctile(ob1, pi)
assert_allclose(expectedi, actuali)
def test_norm(self):
np.random.seed(0)
N = 1000
x = np.random.standard_normal(N)
targ = np.linalg.norm(x)
res = mlab._norm(x)
assert_almost_equal(targ, res)
class spacing_testcase(CleanupTestCase):
def test_logspace_tens(self):
xmin = .01
xmax = 1000.
N = 6
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_allclose(targ, res)
def test_logspace_primes(self):
xmin = .03
xmax = 1313.
N = 7
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_allclose(targ, res)
def test_logspace_none(self):
xmin = .03
xmax = 1313.
N = 0
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_array_equal(targ, res)
assert_equal(res.size, 0)
def test_logspace_single(self):
xmin = .03
xmax = 1313.
N = 1
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_array_equal(targ, res)
assert_equal(res.size, 1)
class stride_testcase(CleanupTestCase):
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0):
'''This is an adaptation of the original window extraction
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
return result
def test_stride_windows_2D_ValueError(self):
x = np.arange(10)[np.newaxis]
assert_raises(ValueError, mlab.stride_windows, x, 5)
def test_stride_windows_0D_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_windows, x, 5)
def test_stride_windows_noverlap_gt_n_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 2, 3)
def test_stride_windows_noverlap_eq_n_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 2, 2)
def test_stride_windows_n_gt_lenx_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 11)
def test_stride_windows_n_lt_1_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 0)
def test_stride_repeat_2D_ValueError(self):
x = np.arange(10)[np.newaxis]
assert_raises(ValueError, mlab.stride_repeat, x, 5)
def test_stride_repeat_axis_lt_0_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_repeat, x, 5, axis=-1)
def test_stride_repeat_axis_gt_1_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_repeat, x, 5, axis=2)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_repeat, x, 0)
def test_stride_repeat_n1_axis0(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 1)
assert_equal((1, ) + x.shape, y.shape)
assert_array_equal(x, y.flat)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n1_axis1(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 1, axis=1)
assert_equal(x.shape + (1, ), y.shape)
assert_array_equal(x, y.flat)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n5_axis0(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 5)
yr = np.repeat(x[np.newaxis], 5, axis=0)
assert_equal(yr.shape, y.shape)
assert_array_equal(yr, y)
assert_equal((5, ) + x.shape, y.shape)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n5_axis1(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 5, axis=1)
yr = np.repeat(x[np.newaxis], 5, axis=0).T
assert_equal(yr.shape, y.shape)
assert_array_equal(yr, y)
assert_equal(x.shape + (5, ), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n1_noverlap0_axis0(self):
x = np.arange(10)
y = mlab.stride_windows(x, 1)
yt = self.calc_window_target(x, 1)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((1, ) + x.shape, y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n1_noverlap0_axis1(self):
x = np.arange(10)
y = mlab.stride_windows(x, 1, axis=1)
yt = self.calc_window_target(x, 1).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal(x.shape + (1, ), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n5_noverlap0_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 5)
yt = self.calc_window_target(x, 5)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((5, 20), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n5_noverlap0_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 5, axis=1)
yt = self.calc_window_target(x, 5).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((20, 5), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n15_noverlap2_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 15, 2)
yt = self.calc_window_target(x, 15, 2)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((15, 7), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n15_noverlap2_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 15, 2, axis=1)
yt = self.calc_window_target(x, 15, 2).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((7, 15), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n13_noverlapn3_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 13, -3)
yt = self.calc_window_target(x, 13, -3)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((13, 6), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n13_noverlapn3_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 13, -3, axis=1)
yt = self.calc_window_target(x, 13, -3).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((6, 13), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n32_noverlap0_axis0_unflatten(self):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n)
assert_equal(y.shape, x1.T.shape)
assert_array_equal(y, x1.T)
def test_stride_windows_n32_noverlap0_axis1_unflatten(self):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=1)
assert_equal(y.shape, x1.shape)
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.empty(N + 20, dtype='>f4')
x.fill(np.NaN)
y = x[10:-10]
y.fill(0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = mlab.stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
class csv_testcase(CleanupTestCase):
def setUp(self):
if six.PY3:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="w+",
newline='')
else:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="wb+")
def tearDown(self):
self.fd.close()
def test_recarray_csv_roundtrip(self):
expected = np.recarray((99,),
[(str('x'), np.float),
(str('y'), np.float),
(str('t'), np.float)])
# initialising all values: uninitialised memory sometimes produces
# floats that do not round-trip to string and back.
expected['x'][:] = np.linspace(-1e9, -1, 99)
expected['y'][:] = np.linspace(1, 1e9, 99)
expected['t'][:] = np.linspace(0, 0.01, 99)
mlab.rec2csv(expected, self.fd)
self.fd.seek(0)
actual = mlab.csv2rec(self.fd)
assert_allclose(expected['x'], actual['x'])
assert_allclose(expected['y'], actual['y'])
assert_allclose(expected['t'], actual['t'])
def test_rec2csv_bad_shape_ValueError(self):
bad = np.recarray((99, 4), [(str('x'), np.float),
(str('y'), np.float)])
# the bad recarray should trigger a ValueError for having ndim > 1.
assert_raises(ValueError, mlab.rec2csv, bad, self.fd)
class window_testcase(CleanupTestCase):
def setUp(self):
np.random.seed(0)
self.n = 1000
self.x = np.arange(0., self.n)
self.sig_rand = np.random.standard_normal(self.n) + 100.
self.sig_ones = np.ones_like(self.x)
self.sig_slope = np.linspace(-10., 90., self.n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
'''This is an adaptation of the original window application
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if cbook.iterable(window):
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
assert_raises(ValueError, mlab.apply_window, x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, return_window=True)
yt = window(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = mlab.apply_window(x, window1, axis=0, return_window=False)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D__els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = mlab.apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = mlab.apply_window(x, window1, axis=1, return_window=False)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = mlab.apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert_equal(yt.shape, y.shape)
assert_not_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = mlab.apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class detrend_testcase(CleanupTestCase):
def setUp(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend_none(input)
assert_equal(input, targ)
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
res = mlab.detrend_none(input, axis=1)
assert_equal(input, targ)
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key='none')
assert_equal(input, targ)
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key=mlab.detrend_none)
assert_equal(input, targ)
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
res = mlab.detrend_none(input)
assert_equal(input, targ)
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert_equal(res, targ)
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.demean(input, axis=None)
assert_almost_equal(res, targ)
def test_demean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_default(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.demean(input)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.demean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
assert_raises(ValueError, mlab.detrend_mean, input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
assert_raises(ValueError, mlab.detrend, input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.detrend_mean, input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.detrend, input, axis=1)
def test_demean_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.demean, input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend_mean, input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, axis=2)
def test_demean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.demean, input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend_linear, input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
class spectral_testcase_nosig_real_onesided(CleanupTestCase):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='onesided', nsides=1)
def createStim(self, fstims, iscomplex, sides, nsides, len_x=None,
NFFT_density=-1, nover_density=-1, pad_to_density=-1,
pad_to_spectrum=-1):
Fs = 100.
x = np.arange(0, 10, 1/Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs/fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real//2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum_real = nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if nsides == 1:
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs/2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs/2,
num=pad_to_density_real//2+1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs/2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs/2,
num=pad_to_spectrum_real//2+1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs/2, Fs/2,
num=2*pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs/2, Fs/2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs/2, Fs/2,
num=2*pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs/2, Fs/2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real//2
t_stop = len(x) - NFFT_specgram_real//2+1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1/Fs/2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real/(2*Fs)])
t_spectrum = np.array([NFFT_spectrum_real/(2*Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
self.Fs = Fs
self.sides = sides
self.fstims = fstims
self.NFFT_density = NFFT_density
self.nover_density = nover_density
self.pad_to_density = pad_to_density
self.NFFT_spectrum = NFFT_spectrum
self.nover_spectrum = nover_spectrum
self.pad_to_spectrum = pad_to_spectrum
self.NFFT_specgram = NFFT_specgram
self.nover_specgram = nover_specgram
self.pad_to_specgram = pad_to_specgram
self.t_specgram = t_specgram
self.t_density = t_density
self.t_spectrum = t_spectrum
self.y = y
self.freqs_density = freqs_density
self.freqs_spectrum = freqs_spectrum
self.freqs_specgram = freqs_specgram
self.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert_true(resfreqs.argmin() == 0)
assert_true(resfreqs.argmax() == len(resfreqs)-1)
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert_true(vals[i] > vals[i+2])
assert_true(vals[i] > vals[i-2])
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises_complex_same_data(self):
# test that mode 'complex' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='complex')
def test_spectral_helper_raises_magnitude_same_data(self):
# test that mode 'magnitude' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='magnitude')
def test_spectral_helper_raises_angle_same_data(self):
# test that mode 'angle' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='angle')
def test_spectral_helper_raises_phase_same_data(self):
# test that mode 'phase' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='phase')
def test_spectral_helper_raises_unknown_mode(self):
# test that unknown value for mode cannot be used
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, mode='spam')
def test_spectral_helper_raises_unknown_sides(self):
# test that unknown value for sides cannot be used
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, sides='eggs')
def test_spectral_helper_raises_noverlap_gt_NFFT(self):
# test that noverlap cannot be larger than NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, NFFT=10, noverlap=20)
def test_spectral_helper_raises_noverlap_eq_NFFT(self):
# test that noverlap cannot be equal to NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, NFFT=10, noverlap=10)
def test_spectral_helper_raises_winlen_ne_NFFT(self):
# test that the window length cannot be different from NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, NFFT=10, window=np.ones(9))
def test_single_spectrum_helper_raises_mode_default(self):
# test that mode 'default' cannot be used with _single_spectrum_helper
assert_raises(ValueError, mlab._single_spectrum_helper,
x=self.y, mode='default')
def test_single_spectrum_helper_raises_mode_psd(self):
# test that mode 'psd' cannot be used with _single_spectrum_helper
assert_raises(ValueError, mlab._single_spectrum_helper,
x=self.y, mode='psd')
def test_spectral_helper_psd(self):
freqs = self.freqs_density
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
mode='psd')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_density, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_spectral_helper_magnitude_specgram(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_spectral_helper_magnitude_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_spectrum,
Fs=self.Fs,
noverlap=self.nover_spectrum,
pad_to=self.pad_to_spectrum,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_spectrum, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], 1)
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_equal(spec.shape, freqs.shape)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_psd_detrend_mean_func_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_mean_str_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_func_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_str_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = mlab.apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = mlab.apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_psd_windowarray_scale_by_freq(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s, spec_n/self.Fs, atol=1e-08)
def test_complex_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.complex_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.magnitude_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_equal(spec.shape, freqs.shape)
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_angle_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.angle_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_phase_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.phase_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_specgram_auto(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_default(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_psd(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_complex(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm = np.mean(np.abs(spec), axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_magnitude(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_angle(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_specgram_phase(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_psd_csd_equal(self):
freqs = self.freqs_density
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
def test_specgram_auto_default_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
def test_specgram_auto_psd_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
assert_array_equal(speca, specc)
assert_array_equal(freqspeca, freqspecc)
assert_array_equal(ta, tc)
def test_specgram_complex_mag_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(np.abs(specc), specm, atol=1e-06)
def test_specgram_complex_angle_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
assert_array_equal(freqspecc, freqspeca)
assert_array_equal(tc, ta)
assert_allclose(np.angle(specc), speca, atol=1e-06)
def test_specgram_complex_phase_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspecc, freqspecp)
assert_array_equal(tc, tp)
assert_allclose(np.unwrap(np.angle(specc), axis=0), specp,
atol=1e-06)
def test_specgram_angle_phase_equivalent(self):
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspeca, freqspecp)
assert_array_equal(ta, tp)
assert_allclose(np.unwrap(speca, axis=0), specp,
atol=1e-06)
def test_psd_windowarray_equal(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
class spectral_testcase_nosig_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_Fs4_real_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_Fs4_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_Fs4_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_Fs4_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_Fs4_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_Fs4_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_FsAll_real_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_FsAll_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_FsAll_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_FsAll_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_FsAll_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_FsAll_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=128, pad_to_spectrum=128,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=128, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='default', nsides=2)
def test_griddata_linear():
# z is a linear function of x and y.
def get_z(x, y):
return 3.0*x - y
# Passing 1D xi and yi arrays to griddata.
x = np.asarray([0.0, 1.0, 0.0, 1.0, 0.5])
y = np.asarray([0.0, 0.0, 1.0, 1.0, 0.5])
z = get_z(x, y)
xi = [0.2, 0.4, 0.6, 0.8]
yi = [0.1, 0.3, 0.7, 0.9]
zi = mlab.griddata(x, y, z, xi, yi, interp='linear')
xi, yi = np.meshgrid(xi, yi)
np.testing.assert_array_almost_equal(zi, get_z(xi, yi))
# Passing 2D xi and yi arrays to griddata.
zi = mlab.griddata(x, y, z, xi, yi, interp='linear')
np.testing.assert_array_almost_equal(zi, get_z(xi, yi))
# Masking z array.
z_masked = np.ma.array(z, mask=[False, False, False, True, False])
correct_zi_masked = np.ma.masked_where(xi + yi > 1.0, get_z(xi, yi))
zi = mlab.griddata(x, y, z_masked, xi, yi, interp='linear')
matest.assert_array_almost_equal(zi, correct_zi_masked)
np.testing.assert_array_equal(np.ma.getmask(zi),
np.ma.getmask(correct_zi_masked))
@knownfailureif(not HAS_NATGRID)
def test_griddata_nn():
# z is a linear function of x and y.
def get_z(x, y):
return 3.0*x - y
# Passing 1D xi and yi arrays to griddata.
x = np.asarray([0.0, 1.0, 0.0, 1.0, 0.5])
y = np.asarray([0.0, 0.0, 1.0, 1.0, 0.5])
z = get_z(x, y)
xi = [0.2, 0.4, 0.6, 0.8]
yi = [0.1, 0.3, 0.7, 0.9]
correct_zi = [[0.49999252, 1.0999978, 1.7000030, 2.3000080],
[0.29999208, 0.8999978, 1.5000029, 2.1000059],
[-0.1000099, 0.4999943, 1.0999964, 1.6999979],
[-0.3000128, 0.2999894, 0.8999913, 1.4999933]]
zi = mlab.griddata(x, y, z, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi)
# Decreasing xi or yi should raise ValueError.
assert_raises(ValueError, mlab.griddata, x, y, z, xi[::-1], yi,
interp='nn')
assert_raises(ValueError, mlab.griddata, x, y, z, xi, yi[::-1],
interp='nn')
# Passing 2D xi and yi arrays to griddata.
xi, yi = np.meshgrid(xi, yi)
zi = mlab.griddata(x, y, z, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi)
# Masking z array.
z_masked = np.ma.array(z, mask=[False, False, False, True, False])
correct_zi_masked = np.ma.masked_where(xi + yi > 1.0, correct_zi)
zi = mlab.griddata(x, y, z_masked, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi_masked, 5)
np.testing.assert_array_equal(np.ma.getmask(zi),
np.ma.getmask(correct_zi_masked))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retreived from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class gaussian_kde_tests():
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf.all(), kdepdf2.all())
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf.all(), kdepdf3.all())
class gaussian_kde_custom_tests(object):
def test_no_data(self):
"""Pass no data into the GaussianKDE class."""
assert_raises(ValueError, mlab.GaussianKDE, [])
def test_single_dataset_element(self):
"""Pass a single dataset element into the GaussianKDE class."""
assert_raises(ValueError, mlab.GaussianKDE, [42])
def test_silverman_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test silverman's
output"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(np.linalg.LinAlgError, mlab.GaussianKDE, x1, "silverman")
def test_silverman_singledim_dataset(self):
"""Use a single dimension list as the dataset and test silverman's
output."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "silverman")
y_expected = 0.76770389927475502
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scott_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test scott's output
"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(np.linalg.LinAlgError, mlab.GaussianKDE, x1, "scott")
def test_scott_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test scott's
output"""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "scott")
y_expected = 0.72477966367769553
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scalar_empty_dataset(self):
"""Use an empty array as the dataset and test the scalar's cov factor
"""
assert_raises(ValueError, mlab.GaussianKDE, [], bw_method=5)
def test_scalar_covariance_dataset(self):
"""Use a dataset and test a scalar's cov factor
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
kde = mlab.GaussianKDE(multidim_data, bw_method=0.5)
assert_equal(kde.covariance_factor(), 0.5)
def test_callable_covariance_dataset(self):
"""Use a multi-dimensional array as the dataset and test the callable's
cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
callable_fun = lambda x: 0.55
kde = mlab.GaussianKDE(multidim_data, bw_method=callable_fun)
assert_equal(kde.covariance_factor(), 0.55)
def test_callable_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test the
callable's cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data, bw_method='silverman')
y_expected = 0.48438841363348911
assert_almost_equal(kde.covariance_factor(), y_expected, 7)
def test_wrong_bw_method(self):
"""Test the error message that should be called when bw is invalid."""
np.random.seed(8765678)
n_basesample = 50
data = np.random.randn(n_basesample)
assert_raises(ValueError, mlab.GaussianKDE, data, bw_method="invalid")
class gaussian_kde_evaluate_tests(object):
def test_evaluate_diff_dim(self):
"""Test the evaluate method when the dim's of dataset and points are
different dimensions"""
x1 = np.arange(3, 10, 2)
kde = mlab.GaussianKDE(x1)
x2 = np.arange(3, 12, 2)
y_expected = [
0.08797252, 0.11774109, 0.11774109, 0.08797252, 0.0370153
]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_inv_dim(self):
""" Invert the dimensions. i.e., Give the dataset a dimension of
1 [3,2,4], and the points will have a dimension of 3 [[3],[2],[4]].
ValueError should be raised"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data)
x2 = [[1], [2], [3]]
assert_raises(ValueError, kde.evaluate, x2)
def test_evaluate_dim_and_num(self):
""" Tests if evaluated against a one by one array"""
x1 = np.arange(3, 10, 2)
x2 = np.array([3])
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_point_dim_not_one(self):
"""Test"""
x1 = np.arange(3, 10, 2)
x2 = [np.arange(3, 10, 2), np.arange(3, 10, 2)]
kde = mlab.GaussianKDE(x1)
assert_raises(ValueError, kde.evaluate, x2)
def test_evaluate_equal_dim_and_num_lt(self):
"""Test when line 3810 fails"""
x1 = np.arange(3, 10, 2)
x2 = np.arange(3, 8, 2)
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252, 0.11774109, 0.11774109]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
#*****************************************************************
#*****************************************************************
if __name__ == '__main__':
import nose
import sys
args = ['-s', '--with-doctest']
argv = sys.argv
argv = argv[:1] + args + argv[1:]
nose.runmodule(argv=argv, exit=False)
|
gpl-2.0
|
noapex/bcoadmin
|
movimientos/xls_parser.py
|
1
|
4528
|
import pandas as pd
import numpy as np
import datetime
import locale
import math
locale.setlocale(locale.LC_TIME, "es_AR.UTF-8")
locale.setlocale(locale.LC_NUMERIC, 'es_AR.UTF-8')
def get_movimientos(myfile):
try:
xls_file = pd.ExcelFile(myfile)
except Exception as e:
print(e)
return False
df_list = list()
columns = None
row_is_header = False
print(xls_file.io)
for xls in xls_file.sheet_names:
print('procesando', xls)
df = xls_file.parse(xls)
to_delete = list()
mod_cols = None
for idx, row in df.iterrows():
# es el row con la descripcion de las columnas
# if sum(row.isin(['Fecha', 'Sucursal origen', 'Referencia', 'Descripción']).values) == 4:
if sum(row.isin(['Fecha', 'Sucursal Origen', 'Referencia', 'Descripcion']).values) == 4 or sum(row.isin(['Fecha', 'Sucursal origen', 'Referencia', 'Descripción']).values) == 4:
cols_size = row.count()
# consolidado
if cols_size == 9 or cols_size == 7:
columns = ['fecha', 'cuenta', 'descripcion', 'codigo', 'monto', 'cc_pesos', 'saldo_pesos']
if cols_size == 5:
columns = ['fecha', 'cuenta', 'descripcion', 'codigo', 'monto']
# if row.tolist().index('Descripción') == 4:
if any(row.isin(['Descripcion'])) and row.tolist().index('Descripcion') == 4 or any(row.isin(['Descripción'])) and row.tolist().index('Descripción') == 4:
xls_cols = df.columns.tolist()
mod_cols = xls_cols[:3]
modl_cols = mod_cols.extend([xls_cols[4], xls_cols[3]])
modl_cols = mod_cols.extend(xls_cols[5:])
print(row)
# todo lo que no tenga fecha en la segunda columna no es un mov valido, se borra
try:
row[1] = datetime.datetime.strptime(row[1], '%d/%m/%Y')
print(1)
# import pdb; pdb.set_trace()
except:
print(2)
try:
if type(row[1]) is not datetime.datetime:
raise ValueError
except:
try:
print(3)
row[1] = datetime.datetime.strptime(row[1], '%d/%m/%Y %H:%M')
except:
print(4)
to_delete.append(idx)
if idx not in to_delete:
if isinstance(row[5], str):
row[5] = locale.atof(row[5])
if isinstance(row[6], str):
row[6] = locale.atof(row[6])
# si el movimiento de monto es nan y si hay monto en cc_pesos, lo copiamos
if math.isnan(row[5]) and isinstance(row[6], (int, float)):
df.iloc[idx, 5] = row[6]
if mod_cols:
df = df[mod_cols]
df.drop(to_delete, inplace=True)
df.replace('', np.nan, inplace=True)
# no sirve porque puede borrar columnas que ocasionalmente esten vacías como importe cc
# df.dropna(axis=1, how='all', inplace=True)
df.drop(df.columns[0], axis=1, inplace=True)
df.drop(df.columns[7:], axis=1, inplace=True)
# df.to_sql
try:
df.columns = columns
except (TypeError, ValueError) as e:
print('Error:', e)
try:
df['fecha'] = pd.to_datetime(df['fecha'], format='%d/%m/%Y %H:%M')
except:
df['fecha'] = pd.to_datetime(df['fecha'], format='%d/%m/%Y')
if cols_size == 9:
df.drop('cc_pesos', axis=1, inplace=True)
df.drop('saldo_pesos', axis=1, inplace=True)
# TODO: agarrar los mov de las col que no son 'monto' (c ahorro en pesos)
if len(columns) == 7:
df.drop(df.columns[8:], axis=1, inplace=True)
df = df.set_index('codigo')
df.index.name = None
df_list.append(df)
pd.set_option('display.max_rows', None)
return merge_dfs(df_list)
def merge_dfs(df_list):
df = pd.concat(df_list, sort=False).drop_duplicates()
df.sort_values(by='fecha', inplace=True)
df.reset_index(inplace=True)
df.drop(df.columns[5:], axis=1, inplace=True)
df.columns = ['codigo','fecha','cuenta','descripcion', 'monto']
df = df.drop_duplicates(subset=['fecha','codigo'], keep='last').set_index('codigo')
return df
|
gpl-3.0
|
ProjectsUCSC/NLP
|
Assignment 2/preprocess.py
|
1
|
2908
|
import pandas as pd
import os
from nltk.corpus import stopwords
import re
import enchant
from nltk.stem.porter import *
import numpy as np
import cPickle as pickle
# In[2]:
def readData(filename):
cwd = os.getcwd()
path = cwd + "/" + filename;
#print path
df =pd.read_csv(path);
return df
def tokenize_and_stopwords(data_sample):
#data_sample = list(data_sample)
#Get all english stopwords
try:
words = open("common_words.txt", "r").readlines()
for i in range(len(words)):
words[i] = words[i].strip()
except:
words = []
print "words", words
# abb_dict = pickle.load(open("abbreviations", "r"))
stop = stopwords.words('english') + words #list(string.punctuation) + ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
#Use only characters from reviews
data_sample = data_sample.str.replace("[^a-zA-Z ]", " ")#, " ")
data_sample = data_sample.str.lower()
#print data_sample
#tokenize and remove stop words
# for i in range(len(data_sample)):
# for j in data_sample[i].split():
# if i == abb_dict.keys():
# data_sample[i] = data_sample[i].replace(i, abb_dict[i])
return [(" ").join([i for i in sentence.split() if i not in stop]) for sentence in data_sample]
# In[10]:
def cleanhtml(tweet):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', tweet)
return cleantext
def cleanUrl(tweet):
tweet= re.sub(r"http\S+", "", tweet)
return tweet;
def removeMention(tweet):
tweet = tweet.replace("rt@","").rstrip()
tweet = tweet.replace("rt ","").rstrip()
tweet = tweet.replace("@","").rstrip()
return tweet;
# In[11]:
def spellCheck(word):
# d = enchant.Dict()
# if d.check(word) == False:
# word = d.suggest(word)[0] if d.suggest(word) else ""
# #print word
return word
def stemmer(preprocessed_data_sample):
print "stemming "
#Create a new Porter stemmer.
stemmer = PorterStemmer()
#try:
for i in range(len(preprocessed_data_sample)):
#Stemming
try:
preprocessed_data_sample[i] = preprocessed_data_sample[i].replace(preprocessed_data_sample[i], " ".join([stemmer.stem(str(word)) for word in preprocessed_data_sample[i].split()]))
except:
#No stemming
preprocessed_data_sample[i] = preprocessed_data_sample[i].replace(preprocessed_data_sample[i], " ".join([str(word) for word in preprocessed_data_sample[i].split()]))
return preprocessed_data_sample
# In[ ]:
def preprocess(filename):
#filename = "Homework2_data.csv"
df = readData(filename)
df['text'] = df['Tweet_text'].apply(cleanhtml).apply(cleanUrl).apply(removeMention);#.apply(stemmer);
# df['text'] = df['text'].apply(spellCheck)
# df['text'] = stemmer(df["text"])
df['text'] = tokenize_and_stopwords(df['text'])
return df
|
mit
|
w2naf/davitpy
|
gme/sat/rbsp.py
|
3
|
13047
|
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
"""
*********************
**Module**: gme.rbsp
*********************
This module handles RBSP foorpoint calculations and plotting
**Class**:
* :class:`rbspFp`: FPs reading (or calculating) and plotting
"""
############################################################################
# Foot Points (FPs) calculation and plotting
############################################################################
class rbspFp(object):
"""This class reads FPs from the SuperDARN FPs database, or generate them if necessary
**Args**:
* **sTime**: start date/time to get FPs
* **[eTime]**: end date/time to get FPs (defaulst to 24 hours after `sTime`)
* **[hemisphere]**: limit FPs loading to a specific hemisphere
* **[spacecraft]**: limit FPs loading to a specific spacecraft
* **[L_shell_min]**: limit FPs loading to L-shell values greater than this
* **[L_shell_max]**: limit FPs loading to L-shell values lesser than this
* **[apogees_only]**: record foot-points (usefull if all you want are apogees)
**Example**:
::
# Get all the FPs for 1/Sept/2012 from 0 to 6 UT
from datetime import datetime
import rbsp
sTime = datetime(2012,9,1,0)
eTime = datetime(2012,9,1,6)
fps = rbsp.rbspFp(sTime, eTime)
# Pretty print the apogees in that period
print fps
# Plot them on a map
fps.map()
.. warning:: try not to request more than 24 hours at once, unless all you want are apogees.
written by Sebastien de Larquier, 2013-03
"""
def __init__(self, sTime, eTime=None, spacecraft=None,
L_shell_min=None, L_shell_max=None,
apogees_only=False):
from datetime import datetime, timedelta
# MongoDB server
self._db_user = 'sd_dbread'
self._db_pswd = '5d'
self._db_host = 'sd-work9.ece.vt.edu'
self._db_name = 'rbsp'
# Input
self.sTime = sTime
self.eTime = eTime if eTime else sTime + timedelta(hours=24)
self._spacecraft = spacecraft.lower() if spacecraft else ['a','b']
self.L_shell_min = L_shell_min
self.L_shell_max = L_shell_max
self._apogees_only = apogees_only
# Connect to DB
isDb = self.__getFpsFromDb()
if not isDb:
orbit = self.__getOrbit()
trace = self.__getTrace(orbit)
self.scraft = orbit['scraft']
def map(self, hemisphere='north', boundinglat=35,
spacecraft=None, legend=True, date=True,
apogees=False):
"""Plot FPs on a map
**Belongs to**: :class:`rbspFp`
**Args**:
* **[hemisphere]**: plot FPs in this hemisphere ('north' or 'south')
* **[boundinglat]**: bounding latitude of map (absolute value)
* **[spacecraft]**: limit the plotting to one spacecraft ('a' or 'b')
* **[legend]**: to show or not to show the legend
* **[date]**: to show or not to show the date
* **[apogees]**: to show or not to show the apogees
* **[isr]**: a list of ISRs to be plotted (codes include mho, sdt, eiscat, pfisr, risr)
**Returns**:
* **myMap**: a mpl_toolkits.basemap.Basemap object
**Example**:
::
# To plot 2 panels (1 per hemisphere)
fig = figure(figsize=(11,8))
subplot(121)
fps.map()
subplot(122)
fps.map(hemisphere='South')
fig.tight_layout()
"""
from mpl_toolkits.basemap import Basemap
from pylab import gca
ax = gca()
if hemisphere.lower() == 'north':
projection = 'npstere'
sgn = 1
lat = self.latNH
lon = self.lonNH
elif hemisphere.lower() == 'south':
projection = 'spstere'
sgn = -1
lat = self.latSH
lon = self.lonSH
# Generate map and background
myMap = Basemap(projection=projection, lon_0=270, boundinglat=sgn*boundinglat, ax=ax)
myMap.fillcontinents(color='.8')
myMap.drawmeridians(range(0,360,20), alpha=0.5)
myMap.drawparallels(range(-80,81,20), alpha=0.5)
# Calculate FP coordinates
x, y = myMap(lon, lat)
# Scatter FPs
if spacecraft is None or spacecraft == 'a':
myMap.scatter(x[self.scraft == 'a'], y[self.scraft == 'a'],
zorder=5, edgecolors='none', s=10, facecolors='r',
alpha=.8)
if legend:
ax.text(0, -0.01, 'RBSP-A', transform=ax.transAxes,
color='r', ha='left', va='top')
if spacecraft is None or spacecraft == 'b':
myMap.scatter(x[self.scraft == 'b'], y[self.scraft == 'b'],
zorder=5, edgecolors='none', s=10, facecolors='b',
alpha=.8)
if legend:
ax.text(1, -0.01, 'RBSP-B', transform=ax.transAxes,
color='b', ha='right', va='top')
# Show date/time interval
if date:
if self.sTime.date() == self.eTime.date():
dateTitle = '{:%d/%b/%Y %H:%M UT} - {:%H:%M UT}'
elif self.sTime.time() == self.eTime.time():
dateTitle = '{:%d/%b/%Y} - {:%d/%b/%Y (%H:%M UT)}'
else: dateTitle = '{:%d/%b/%Y %H:%M UT} - {:%d/%b/%Y %H:%M UT}'
ax.text(0, 1.01, dateTitle.format(self.sTime, self.eTime), transform=ax.transAxes)
if apogees:
myMap.scatter(x[self.apogees], y[self.apogees],
zorder=5, edgecolors='w', s=10, facecolors='k')
for ap in self.apogees:
self.__textHighlighted((x[ap],y[ap]), '{:%H:%M}'.format(self.times[ap]))
return myMap
def showISR(self, myMap, isr):
"""overlay ISR fovs on map
**Belongs to**: :class:`rbspFp`
**Args**:
* **myMap**: Basemap object
* **isr**: a list of ISRs to be plotted (codes include mho, sdt, eiscat, pfisr, risr)
"""
if isinstance(isr, str): isr = [isr]
for rad in isr:
dbConn = self.__dbConnect('isr')
if not dbConn:
print 'Could not access DB'
return
qIn = {'code': rad}
qRes = dbConn.info.find(qIn)
if qRes.count() == 0:
print 'Radar {} not found in db'.format(rad)
print 'Use one or more in {}'.format(dbConn.codes.find_one()['codes'])
continue
for el in qRes:
x, y = myMap(el['pos']['lon'], el['pos']['lat'])
myMap.scatter(x, y, zorder=6, s=20, facecolors='k')
if isinstance(x, list): x, y = x[0], y[0]
myMap.ax.text(x*1.04, y*0.96, el['code'].upper())
x, y = myMap(el['fov']['lon'], el['fov']['lat'])
myMap.plot(x, y, 'g')
def __getFpsFromDb(self):
"""Get FPs from DB
**Belongs to**: :class:`rbspFp`
**Args**:
* **None**
**Returns**:
* **isSuccess**: True it worked, False otherwise
"""
import numpy as np
dbConn = self.__dbConnect(self._db_name)
if not dbConn: return False
isAp = True if self._apogees_only else None
# Build querry
qIn = {'time': {'$gte': self.sTime, '$lte': self.eTime}}
if not self._spacecraft == ['a', 'b']:
qIn['spacecraft'] = self._spacecraft.upper()
if self.L_shell_min:
if 'L' not in qIn: qIn['L'] = {}
qIn['L']['$gte'] = self.L_shell_min
if self.L_shell_max:
if 'L' not in qIn: qIn['L'] = {}
qIn['L']['$lte'] = self.L_shell_max
if self._apogees_only:
qIn['isApogee'] = True
# Launch query
qRes = dbConn.ephemeris.find(qIn).sort('time')
if qRes.count() == 0: return False
# Store query results
self.lonNH = []
self.latNH = []
self.lonSH = []
self.latSH = []
self.times = []
self.scraft = []
self.apogees = []
self.L = []
for i, el in enumerate(qRes):
self.lonNH.append( el['lonNH'] )
self.latNH.append( el['latNH'] )
self.lonSH.append( el['lonSH'] )
self.latSH.append( el['latSH'] )
self.times.append( el['time'] )
self.scraft.append( el['scraft'] )
self.L.append( el['L'] )
if el['isApogee']: self.apogees.append( i )
self.lonNH = np.array(self.lonNH)
self.latNH = np.array(self.latNH)
self.lonSH = np.array(self.lonSH)
self.latSH = np.array(self.latSH)
self.times = np.array(self.times)
self.scraft = np.array(self.scraft)
self.apogees = np.array(self.apogees)
self.L = np.array(self.L)
return True
def __dbConnect(self, dbName):
"""Try to establish a connection to remote db database
**Belongs to**: :class:`rbspFp`
**Args**:
* **None**
**Returns**:
* **dbConn**: pymongo database object
"""
from pymongo import MongoClient
import sys
try:
conn = MongoClient( 'mongodb://{}:{}@{}/{}'.format(self._db_user,
self._db_pswd,
self._db_host,
dbName) )
dba = conn[dbName]
except:
print 'Could not connect to remote DB: ', sys.exc_info()[0]
return False
return dba
def __getOrbit(self):
"""Get orbit data from APL
**Belongs to**: :class:`rbspFp`
**Args**:
* **None**
**Returns
"""
import urllib2, urllib
from datetime import datetime
print 'Get orbit from JHU/APL'
header = 'on'
cmode = 'geo'
Cadence = 5
try:
[i in self._spacecraft]
scraft = set(self._spacecraft)
except:
scraft = (self._spacecraft)
orbit = {'time': [],
'alt': [],
'lat': [],
'lon': [],
'scraft': []}
for sc in scraft:
params = urllib.urlencode({'sDay': str( self.sTime.day ),
'sMonth': str( self.sTime.month ),
'sYear': str( self.sTime.year ),
'sHour': str( self.sTime.hour ),
'sMinute': str( self.sTime.minute ),
'eDay': str( self.eTime.day ),
'eMonth': str( self.eTime.month ),
'eYear': str( self.eTime.year ),
'eHour': str( self.eTime.hour ),
'eMinute': str( self.eTime.minute ),
'Cadence': str( Cadence ),
'mode': str( cmode ),
'scraft': sc,
'header': header,
'getASCII': 'Get ASCII Output'})
f = urllib2.urlopen("http://athena.jhuapl.edu/LT_Position_Calc", params)
# f = urllib2.urlopen("http://athena.jhuapl.edu/orbit_pos", params)
out = f.read().splitlines()
f.close()
st = out.index('<pre>')+1
ed = out.index('</pre>')
header = out[st].split()
lines = out[st+1:ed]
for i,l in enumerate(lines):
row = l.split()
cTime = datetime( int(row[0]), int(row[1]), int(row[2]),
int(row[3]), int(row[4]), int(row[5]) )
orbit['time'].append( cTime )
orbit['alt'].append( float(row[6]) )
orbit['lat'].append( float(row[7]) )
orbit['lon'].append( float(row[8]) )
orbit['scraft'].append( sc )
return orbit
def __getTrace(self, data):
"""Trace orbit to the ionosphere
**Belongs to**: :class:`rbspFp`
**Args**:
* **data**: a dictionnary containing ephemeris (with keys 'lat', 'lon', 'alt', 'time')
"""
import tsyganenko as ts
import numpy as np
fname = 'trace.{:%Y%m%d}.{:%Y%m%d}.dat'.format(self.sTime, self.eTime)
try:
trace = ts.tsygTrace(filename=fname)
print 'Read tracing results...'
except:
print 'Tracing...'
trace = ts.tsygTrace(data['lat'], data['lon'], data['alt'], datetime=data['time'], rmin=1.047)
trace.save( fname )
self.lonNH = trace.lonNH
self.latNH = trace.latNH
self.lonSH = trace.lonSH
self.latSH = trace.latSH
self.times = trace.datetime
# Mark apogees
mins = np.r_[True, trace.rho[1:] >= trace.rho[:-1]] & np.r_[trace.rho[:-1] > trace.rho[1:], True]
mins[0] = mins[-1] = False
self.apogees = np.where(mins)[0]
def __repr__(self):
sOut = 'Van Allen Probes (a.k.a. RBSP) ionospheric footpoints\n'
sOut += '{:%Y-%b-%d at %H:%M UT} to {:%Y-%b-%d at %H:%M UT}\n'.format(self.sTime, self.eTime)
sOut += '\t{} points\n'.format(len(self.times))
sOut += '\t{} apogee(s):\n'.format(len(self.apogees))
if len(self.apogees) > 0:
for i in self.apogees:
sOut += '\t\t{:%H:%M} UT, {}: ({:6.2f} N, {:6.2f} E)\t({:6.2f} N, {:6.2f} E)\n'.format(self.times[i], self.scraft[i].upper(),
self.latNH[i], self.lonNH[i],
self.latSH[i], self.lonSH[i])
return sOut
def __textHighlighted(self, xy, text, zorder=None, color='k', fontsize=None):
"""Plot highlighted annotation (with a white lining)
**Belongs to**: :class:`rbspFp`
**Args**:
* **xy**: position of point to annotate
* **text**: text to show
* **[zorder]**: text zorder
* **[color]**: text color
* **[fontsize]**: text font size
"""
import matplotlib as mp
from pylab import gca
ax = gca()
text_path = mp.text.TextPath( (0,0), text, size=fontsize)
p1 = mp.patches.PathPatch(text_path, ec="w", lw=2, fc="w", alpha=0.7, zorder=zorder,
transform=mp.transforms.IdentityTransform())
p2 = mp.patches.PathPatch(text_path, ec="none", fc=color, zorder=zorder,
transform=mp.transforms.IdentityTransform())
offsetbox2 = mp.offsetbox.AuxTransformBox(mp.transforms.IdentityTransform())
offsetbox2.add_artist(p1)
offsetbox2.add_artist(p2)
ax2disp = ax.transAxes.transform
disp2ax = ax.transAxes.inverted().transform
data2disp = ax.transData.transform
disp2data = ax.transData.inverted().transform
xyA = disp2ax( data2disp( xy ) )
frac = 0.5
scatC = (-frac*(xyA[0]-0.5)+xyA[0], -frac*(xyA[1]-0.5)+xyA[1])
scatC = disp2data( ax2disp( scatC ) )
ab = mp.offsetbox.AnnotationBbox( offsetbox2, xy,
xybox=(scatC[0], scatC[1]),
boxcoords="data",
box_alignment=(.5,.5),
arrowprops=dict(arrowstyle="-|>",
facecolor='none'),
frameon=False )
ax.add_artist(ab)
############################################################################
|
gpl-3.0
|
janez87/social-knowledge-extractor
|
test/run_scenarios.py
|
2
|
2019
|
__author__ = 'marcotagliabue'
import configuration
import pandas as pd
from utils import mongo_manager
from crawler.crawler_pipeline import PipelineCrawler
from orchestrator import Orchestrator
from knowledge_extractor.pipeline import Pipeline
import sys
from bson import ObjectId
# DANDELION API KEYS
configuration.API_KEY_DANDELION1 = sys.argv[1]
configuration.APP1_ID = sys.argv[2]
configuration.API_KEY_DANDELION2 = sys.argv[3]
configuration.APP2_ID = sys.argv[4]
configuration.API_KEY_DANDELION3 = sys.argv[5]
configuration.APP3_ID = sys.argv[6]
configuration.API_KEY_DANDELION4 = sys.argv[7]
configuration.APP4_ID = sys.argv[8]
# Twitter API
configuration.access_token = sys.argv[9]
configuration.access_token_secret = sys.argv[10]
configuration.consumer_key = sys.argv[11]
configuration.consumer_secret = sys.argv[12]
db_manager = mongo_manager.MongoManager(configuration.db_name)
def run_scenario(scenario):
diction = {"email":"marco.tagliabue@"+scenario+".com"}
diction["status"] = "processing"
seeds_dataframe = pd.read_csv("../data/In_csv/"+scenario+"/seed.csv")
seeds = seeds_dataframe.ix[:, 1].tolist()
expert_dataframe = pd.read_csv("../data/In_csv/"+scenario+"/expert_types.csv")
experts = expert_dataframe.ix[:, 0].tolist()
diction["expert_types"] = experts
id_experiment = db_manager.write_mongo("experiment", diction)
crawler = PipelineCrawler(100,seeds,id_experiment,db_manager)
knowldege_extractor = Pipeline(db_manager,id_experiment)
orchestrator = Orchestrator(crawler,knowldege_extractor,id_experiment, db_manager)
return id_experiment
def create_rank(id_experiment, scenario):
dataframe = pd.DataFrame(list(db_manager.find("rankings",{"experiment_id":id_experiment})))
dataframe.to_csv("../data/Out_csv/"+scenario+".csv", index=False)
if __name__ == '__main__':
print(sys.argv)
scenario = ["au", "finance", "fashion", "expo"]
id_experiment = run_scenario(scenario[3])
create_rank(id_experiment, scenario[3])
|
apache-2.0
|
sunsistemo/mozzacella-automato-salad
|
rule30.py
|
1
|
3276
|
import os
import sys
from time import sleep
from optparse import OptionParser
from builtins import int
try:
from tqdm import trange
import numpy as np
import matplotlib.pyplot as plt
except ImportError:
pass
STATE = None
def seed_gen(n=261):
global STATE
STATE = list(map(int, bin(int.from_bytes(os.urandom(n // 8 + 1), byteorder="little"))[2:][:n]))
def step(a, b, c):
return a ^ (b or c)
def evolve_state():
global STATE
state = STATE
newstate = [0] * len(state)
newstate[0] = step(state[-1], state[0], state[1])
for i in range(1, len(state) - 1):
newstate[i] = step(state[i - 1], state[i], state[i + 1])
newstate[-1] = step(state[-2], state[-1], state[0])
STATE = newstate
def print_ca(size=100):
global STATE
seed_gen(size)
while True:
evolve_state()
print("".join([str(i) for i in STATE]))
sleep(0.02)
def randbit():
global STATE
evolve_state()
return STATE[0]
def randint(a, b, num_bits=None):
"""a and b are ints such that a < b."""
if num_bits is None:
interval = b - a
is_power_of_two = sum(int(i) for i in bin(interval)[2:]) == 1
if not is_power_of_two:
print("So long sucker!")
random_backup()
sys.exit()
num_bits = len(bin(interval)[2:]) - 1
bits = [0] * num_bits
for i in range(num_bits):
bits[i] = randbit()
return a + int("".join(map(str, bits)), 2)
def random_backup():
import webbrowser
webbrowser.open("http://random.org")
def generate_nums(n=int(1E4), b=32):
nums = [0] * n
for i in trange(n):
nums[i] = randint(0, b)
return nums
def bytestream(a, b):
num_bits = len(bin(b - a)[2:]) - 1
num_bytes = num_bits // 8
assert num_bits == 8 * num_bytes
if sys.version_info.major >= 3:
write = sys.stdout.buffer.write
else:
write = sys.stdout.write
while True:
write(bytearray([randint(a, b, num_bits) for _ in range(2 ** 12)]))
def bitstream():
write = sys.stdout.write
bitstring = {1: '1', 0: '0'}
while True:
bits = [bitstring[randbit()] for _ in range(8192)]
write("".join(bits))
def plot_uniform(nums, b):
plt.hist(nums, bins=b)
plt.show()
def frequency_test(nums, b):
bin_nums = [x[2:].zfill(int.bit_length(b-1)) for x in list(map(bin,map(int,nums)))] #
num_ones = 0
num_zeros = 0
for b in bin_nums:
num_ones += b.count("1")
num_zeros += b.count("0")
print("Frequency Test: [#0: %d], [#1: %d]" % (num_zeros, num_ones))
def main():
parser = OptionParser()
parser.set_defaults(num_gens=int(1E4))
parser.add_option('-n', dest='num_gens',
help='Number of random numbers to generate')
parser.add_option("-B", "--bytestream", action="store_true")
parser.add_option("-b", "--bitstream", action="store_true")
(options, args) = parser.parse_args()
seed_gen()
n = int(options.num_gens)
b = 32
if options.bytestream:
return bytestream(0, 2 ** 8)
elif options.bitstream:
return bitstream()
nums = generate_nums(n, b)
plot_uniform(nums, b)
frequency_test(nums, b)
if __name__ == "__main__":
main()
|
gpl-3.0
|
INM-6/hybridLFPy
|
examples/example_microcircuit.py
|
2
|
23377
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Hybrid LFP scheme example script, applying the methodology with the model of:
Potjans, T. and Diesmann, M. "The Cell-Type Specific Cortical Microcircuit:
Relating Structure and Activity in a Full-Scale Spiking Network Model".
Cereb. Cortex (2014) 24 (3): 785-806.
doi: 10.1093/cercor/bhs358
Synopsis of the main simulation procedure:
1. Loading of parameterset
a. network parameters
b. parameters for hybrid scheme
2. Set up file destinations for different simulation output
3. network simulation
a. execute network simulation using NEST (www.nest-simulator.org)
b. merge nest spike output from different MPI ranks
4. Create a object-representation that uses sqlite3 of all the spiking output
5. Iterate over post-synaptic populations:
a. Create Population object with appropriate parameters for
each specific population
b. Run all computations for populations
c. Postprocess simulation output of all cells in population
6. Postprocess all cell- and population-specific output data
7. Create a tarball for all non-redundant simulation output
The full simulation can be evoked by issuing a mpirun call, such as
mpirun -np 64 python example_microcircuit.py
where the number 64 is the desired number of MPI threads & CPU cores
Given the size of the network and demands for the multi-compartment LFP-
predictions using the present scheme, running the model on a large scale
compute facility is strongly encouraged.
'''
from example_plotting import *
import matplotlib.pyplot as plt
from example_microcircuit_params import multicompartment_params, \
point_neuron_network_params
import os
if 'DISPLAY' not in os.environ:
import matplotlib
matplotlib.use('Agg')
import numpy as np
from time import time
import neuron # NEURON compiled with MPI must be imported before NEST and mpi4py
# to avoid NEURON being aware of MPI.
import nest # Import not used, but done in order to ensure correct execution
from hybridLFPy import PostProcess, Population, CachedNetwork
from hybridLFPy import setup_file_dest, helpers
from glob import glob
import tarfile
import lfpykit
from mpi4py import MPI
# set some seed values
SEED = 12345678
SIMULATIONSEED = 12345678
np.random.seed(SEED)
################# Initialization of MPI stuff ############################
COMM = MPI.COMM_WORLD
SIZE = COMM.Get_size()
RANK = COMM.Get_rank()
# if True, execute full model. If False, do only the plotting. Simulation results
# must exist.
properrun = True
# check if mod file for synapse model specified in expisyn.mod is loaded
if not hasattr(neuron.h, 'ExpSynI'):
if RANK == 0:
os.system('nrnivmodl')
COMM.Barrier()
neuron.load_mechanisms('.')
##########################################################################
# PARAMETERS
##########################################################################
# Full set of parameters including network parameters
params = multicompartment_params()
##########################################################################
# Function declaration(s)
##########################################################################
def merge_gdf(model_params,
raw_label='spikes_',
file_type='gdf',
fileprefix='spikes',
skiprows=0):
'''
NEST produces one file per virtual process containing recorder output.
This function gathers and combines them into one single file per
network population.
Parameters
----------
model_params : object
network parameters object
raw_label : str
file_type : str
fileprefix : str
skiprows : int
Returns
-------
None
'''
def get_raw_gids(model_params):
'''
Reads text file containing gids of neuron populations as created within
the NEST simulation. These gids are not continuous as in the simulation
devices get created in between.
Parameters
----------
model_params : object
network parameters object
Returns
-------
gids : list
list of neuron ids and value (spike time, voltage etc.)
'''
gidfile = open(os.path.join(model_params.raw_nest_output_path,
model_params.GID_filename), 'r')
gids = []
for l in gidfile:
a = l.split()
gids.append([int(a[0]), int(a[1])])
return gids
# some preprocessing
raw_gids = get_raw_gids(model_params)
pop_sizes = [raw_gids[i][1] - raw_gids[i][0] + 1
for i in np.arange(model_params.Npops)]
raw_first_gids = [raw_gids[i][0] for i in np.arange(model_params.Npops)]
converted_first_gids = [int(1 + np.sum(pop_sizes[:i]))
for i in np.arange(model_params.Npops)]
for pop_idx in np.arange(model_params.Npops):
if pop_idx % SIZE == RANK:
files = glob(os.path.join(model_params.raw_nest_output_path,
raw_label + '{}*.{}'.format(pop_idx,
file_type)))
gdf = [] # init
for f in files:
new_gdf = helpers.read_gdf(f, skiprows)
for line in new_gdf:
line[0] = line[0] - raw_first_gids[pop_idx] + \
converted_first_gids[pop_idx]
gdf.append(line)
print(
'writing: {}'.format(
os.path.join(
model_params.spike_output_path,
fileprefix +
'_{}.{}'.format(
model_params.X[pop_idx],
file_type))))
helpers.write_gdf(
gdf,
os.path.join(
model_params.spike_output_path,
fileprefix +
'_{}.{}'.format(
model_params.X[pop_idx],
file_type)))
COMM.Barrier()
return
def dict_of_numpyarray_to_dict_of_list(d):
'''
Convert dictionary containing numpy arrays to dictionary containing lists
Parameters
----------
d : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
d : dict
modified dictionary
'''
for key, value in d.items():
if isinstance(value, dict): # if value == dict
# recurse
d[key] = dict_of_numpyarray_to_dict_of_list(value)
elif isinstance(value, np.ndarray): # or isinstance(value,list) :
d[key] = value.tolist()
return d
def send_nest_params_to_sli(p):
'''
Read parameters and send them to SLI
Parameters
----------
p : dict
sli parameter name and value as dictionary key and value pairs
Returns
-------
None
'''
for name in list(p.keys()):
value = p[name]
if isinstance(value, np.ndarray):
value = value.tolist()
if isinstance(value, dict):
value = dict_of_numpyarray_to_dict_of_list(value)
if name == 'neuron_model': # special case as neuron_model is a
# NEST model and not a string
try:
nest.ll_api.sli_run('/' + name)
nest.ll_api.sli_push(value)
nest.ll_api.sli_run('eval')
nest.ll_api.sli_run('def')
except BaseException:
print('Could not put variable %s on SLI stack' % (name))
print(type(value))
else:
try:
nest.ll_api.sli_run('/' + name)
nest.ll_api.sli_push(value)
nest.ll_api.sli_run('def')
except BaseException:
print('Could not put variable %s on SLI stack' % (name))
print(type(value))
return
def sli_run(parameters=object(),
fname='microcircuit.sli',
verbosity='M_INFO'):
'''
Takes parameter-class and name of main sli-script as input, initiating the
simulation.
Parameters
----------
parameters : object
parameter class instance
fname : str
path to sli codes to be executed
verbosity : str,
nest verbosity flag
Returns
-------
None
'''
# Load parameters from params file, and pass them to nest
# Python -> SLI
send_nest_params_to_sli(vars(parameters))
# set SLI verbosity
nest.ll_api.sli_run("%s setverbosity" % verbosity)
# Run NEST/SLI simulation
nest.ll_api.sli_run('(%s) run' % fname)
def tar_raw_nest_output(raw_nest_output_path,
delete_files=True,
filepatterns=['voltages*.dat',
'spikes*.dat',
'weighted_input_spikes*.dat'
'*.gdf']):
'''
Create tar file of content in `raw_nest_output_path` and optionally
delete files matching given pattern.
Parameters
----------
raw_nest_output_path: path
params.raw_nest_output_path
delete_files: bool
if True, delete .dat files
filepatterns: list of str
patterns of files being deleted
'''
if RANK == 0:
# create tarfile
fname = raw_nest_output_path + '.tar'
with tarfile.open(fname, 'a') as t:
t.add(raw_nest_output_path)
# remove files from <raw_nest_output_path>
for pattern in filepatterns:
for f in glob(os.path.join(raw_nest_output_path, pattern)):
try:
os.remove(f)
except OSError:
print('Error while deleting {}'.format(f))
# sync
COMM.Barrier()
return
###############################################################################
# MAIN simulation procedure
###############################################################################
# tic toc
tic = time()
if properrun:
# set up the file destination
setup_file_dest(params, clearDestination=True)
######## Perform network simulation ######################################
if properrun:
# initiate nest simulation with only the point neuron network parameter
# class
networkParams = point_neuron_network_params()
sli_run(parameters=networkParams,
fname='microcircuit.sli',
verbosity='M_INFO')
# preprocess the gdf files containing spiking output, voltages, weighted and
# spatial input spikes and currents:
merge_gdf(networkParams,
raw_label=networkParams.spike_recorder_label,
file_type='dat',
fileprefix=params.networkSimParams['label'],
skiprows=3)
# create tar file archive of <raw_nest_output_path> folder as .dat files are
# no longer needed. Remove
tar_raw_nest_output(params.raw_nest_output_path, delete_files=True)
# Create an object representation of the simulation output that uses sqlite3
networkSim = CachedNetwork(**params.networkSimParams)
toc = time() - tic
print('NEST simulation and gdf file processing done in %.3f seconds' % toc)
# Set up LFPykit measurement probes for LFPs and CSDs
if properrun:
probes = []
probes.append(lfpykit.RecExtElectrode(cell=None, **params.electrodeParams))
probes.append(
lfpykit.LaminarCurrentSourceDensity(
cell=None,
**params.CSDParams))
probes.append(lfpykit.CurrentDipoleMoment(cell=None))
####### Set up populations ###############################################
if properrun:
# iterate over each cell type, run single-cell simulations and create
# population object
for i, y in enumerate(params.y):
# create population:
pop = Population(
# parent class parameters
cellParams=params.yCellParams[y],
rand_rot_axis=params.rand_rot_axis[y],
simulationParams=params.simulationParams,
populationParams=params.populationParams[y],
y=y,
layerBoundaries=params.layerBoundaries,
probes=probes,
savelist=params.savelist,
savefolder=params.savefolder,
dt_output=params.dt_output,
POPULATIONSEED=SIMULATIONSEED + i,
# daughter class kwargs
X=params.X,
networkSim=networkSim,
k_yXL=params.k_yXL[y],
synParams=params.synParams[y],
synDelayLoc=params.synDelayLoc[y],
synDelayScale=params.synDelayScale[y],
J_yX=params.J_yX[y],
tau_yX=params.tau_yX[y],
recordSingleContribFrac=params.recordSingleContribFrac,
)
# run population simulation and collect the data
pop.run()
pop.collect_data()
# object no longer needed
del pop
####### Postprocess the simulation output ################################
# reset seed, but output should be deterministic from now on
np.random.seed(SIMULATIONSEED)
if properrun:
# do some postprocessing on the collected data, i.e., superposition
# of population LFPs, CSDs etc
postproc = PostProcess(y=params.y,
dt_output=params.dt_output,
probes=probes,
savefolder=params.savefolder,
mapping_Yy=params.mapping_Yy,
savelist=params.savelist
)
# run through the procedure
postproc.run()
# create tar-archive with output
postproc.create_tar_archive()
# tic toc
print('Execution time: %.3f seconds' % (time() - tic))
##########################################################################
# Create set of plots from simulation output
##########################################################################
########## matplotlib settings ###########################################
plt.close('all')
if RANK == 0:
# create network raster plot
x, y = networkSim.get_xy((500, 1000), fraction=1)
fig, ax = plt.subplots(1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
networkSim.plot_raster(ax, (500, 1000), x, y, markersize=1, marker='o',
alpha=.5, legend=False, pop_names=True)
remove_axis_junk(ax)
ax.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax.set_ylabel('population', labelpad=0.1)
ax.set_title('network raster')
fig.savefig(os.path.join(params.figures_path, 'network_raster.pdf'),
dpi=300)
plt.close(fig)
# plot cell locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
ax.set_title('layers')
fig.savefig(os.path.join(params.figures_path, 'layers.pdf'), dpi=300)
plt.close(fig)
# plot cell locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
plot_soma_locations(ax, X=params.y,
populations_path=params.populations_path,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
isometricangle=np.pi / 24, )
ax.set_title('soma positions')
fig.savefig(os.path.join(params.figures_path, 'soma_locations.pdf'),
dpi=150)
plt.close(fig)
# plot morphologies in their respective locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
plot_morphologies(ax,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
isometricangle=np.pi / 24,
populations_path=params.populations_path,
cellParams=params.yCellParams,
fraction=0.02)
ax.set_title('LFP generators')
fig.savefig(os.path.join(params.figures_path, 'populations.pdf'), dpi=300)
plt.close(fig)
# plot morphologies in their respective locations
fig, ax = plt.subplots(1, 1, figsize=(5, 8))
fig.subplots_adjust(left=0.2)
plot_population(ax, params.populationParams, params.electrodeParams,
params.layerBoundaries,
X=params.y,
markers=['*' if 'b' in y else '^' for y in params.y],
colors=['b' if 'b' in y else 'r' for y in params.y],
layers=['L1', 'L2/3', 'L4', 'L5', 'L6'],
isometricangle=np.pi / 24, aspect='equal')
plot_individual_morphologies(
ax,
X=params.y,
markers=[
'*' if 'b' in y else '^' for y in params.y],
colors=[
'b' if 'b' in y else 'r' for y in params.y],
isometricangle=np.pi / 24,
cellParams=params.yCellParams,
populationParams=params.populationParams)
ax.set_title('morphologies')
fig.savefig(os.path.join(params.figures_path, 'cell_models.pdf'), dpi=300)
plt.close(fig)
# plot compound LFP and CSD traces
fig = plt.figure(figsize=(13, 8))
fig.subplots_adjust(left=0.075, right=0.95, bottom=0.075, top=0.95,
hspace=0.2, wspace=0.2)
gs = gridspec.GridSpec(2, 2)
ax0 = fig.add_subplot(gs[:, 0])
ax1 = fig.add_subplot(gs[0, 1])
ax2 = fig.add_subplot(gs[1, 1])
ax0.set_title('network raster')
ax1.set_title('CSD')
ax2.set_title('LFP')
T = (500, 700)
x, y = networkSim.get_xy(T, fraction=1)
networkSim.plot_raster(ax0, T, x, y, markersize=1, marker='o',
alpha=.5, legend=False, pop_names=True)
remove_axis_junk(ax0)
ax0.set_xlabel(r'$t$ (ms)', labelpad=0.1)
ax0.set_ylabel('population', labelpad=0.1)
plot_signal_sum(ax1, z=params.electrodeParams['z'],
fname=os.path.join(params.savefolder,
'LaminarCurrentSourceDensity_sum.h5'),
unit='nA$\\mu$m$^{-3}$', T=T)
ax1.set_xticklabels([])
ax1.set_xlabel('')
plot_signal_sum(ax2, z=params.electrodeParams['z'],
fname=os.path.join(params.savefolder,
'RecExtElectrode_sum.h5'),
unit='mV', T=T)
ax2.set_xlabel('$t$ (ms)')
fig.savefig(os.path.join(params.figures_path, 'compound_signals.pdf'),
dpi=300)
plt.close(fig)
# plot some stats for current dipole moments of each population,
# temporal traces,
# and EEG predictions on scalp using 4-sphere volume conductor model
from LFPy import FourSphereVolumeConductor
T = [500, 1000]
P_Y_var = np.zeros((len(params.Y) + 1, 3)) # dipole moment variance
for i, Y in enumerate(params.Y):
f = h5py.File(
os.path.join(
params.savefolder,
'populations',
'{}_population_CurrentDipoleMoment.h5'.format(Y)),
'r')
srate = f['srate'][()]
P_Y_var[i, :] = f['data'][:, int(T[0] * 1000 / srate):].var(axis=-1)
f_sum = h5py.File(os.path.join(params.savefolder,
'CurrentDipoleMoment_sum.h5'), 'r')
P_Y_var[-1, :] = f_sum['data'][:, int(T[0] * 1000 / srate):].var(axis=-1)
tvec = np.arange(f_sum['data'].shape[-1]) * 1000. / srate
fig = plt.figure(figsize=(5, 8))
fig.subplots_adjust(left=0.2, right=0.95, bottom=0.075, top=0.95,
hspace=0.4, wspace=0.2)
ax = fig.add_subplot(3, 2, 1)
ax.plot(P_Y_var, '-o')
ax.legend(['$P_x$', '$P_y$', '$P_z$'], fontsize=8, frameon=False)
ax.set_xticks(np.arange(len(params.Y) + 1))
ax.set_xticklabels(params.Y + ['SUM'], rotation='vertical')
ax.set_ylabel(r'$\sigma^2 (\mathrm{nA}^2 \mu\mathrm{m}^2)$', labelpad=0)
ax.set_title('signal variance')
# make some EEG predictions
radii = [79000., 80000., 85000., 90000.]
sigmas = [0.3, 1.5, 0.015, 0.3]
r = np.array([[0., 0., 90000.]])
rz = np.array([0., 0., 78000.])
# draw spherical shells
ax = fig.add_subplot(3, 2, 2, aspect='equal')
phi = np.linspace(np.pi / 4, np.pi * 3 / 4, 61)
for R in radii:
x = R * np.cos(phi)
y = R * np.sin(phi)
ax.plot(x, y, lw=0.5)
ax.plot(0, rz[-1], 'k.', clip_on=False)
ax.plot(0, r[0, -1], 'k*', clip_on=False)
ax.axis('off')
ax.legend(['brain', 'CSF', 'skull', 'scalp', r'$\mathbf{P}$', 'EEG'],
fontsize=8, frameon=False)
ax.set_title('4-sphere head model')
sphere_model = FourSphereVolumeConductor(r, radii, sigmas)
# current dipole moment
p = f_sum['data'][:, int(T[0] * 1000 / srate):int(T[1] * 1000 / srate)]
# compute potential
potential = sphere_model.get_dipole_potential(p, rz)
# plot dipole moment
ax = fig.add_subplot(3, 1, 2)
ax.plot(tvec[(tvec >= T[0]) & (tvec < T[1])], p.T)
ax.set_ylabel(r'$\mathbf{P}(t)$ (nA$\mu$m)', labelpad=0)
ax.legend(['$P_x$', '$P_y$', '$P_z$'], fontsize=8, frameon=True)
ax.set_title('current dipole moment sum')
# plot surface potential directly on top
ax = fig.add_subplot(3, 1, 3, sharex=ax)
ax.plot(tvec[(tvec >= T[0]) & (tvec < T[1])],
potential.T * 1000) # mV->uV unit conversion
ax.set_ylabel(r'EEG ($\mu$V)', labelpad=0)
ax.set_xlabel(r'$t$ (ms)', labelpad=0)
ax.set_title('scalp potential')
fig.savefig(
os.path.join(
params.figures_path,
'current_dipole_moments.pdf'),
dpi=300)
plt.close(fig)
# add figures to output .tar archive
with tarfile.open(params.savefolder + '.tar', 'a:') as f:
for pdf in glob(os.path.join(params.figures_path, '*.pdf')):
arcname = os.path.join(os.path.split(
params.savefolder)[-1], 'figures', os.path.split(pdf)[-1])
f.add(name=pdf, arcname=arcname)
|
gpl-3.0
|
rhattersley/iris
|
lib/iris/tests/__init__.py
|
4
|
34468
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides testing capabilities and customisations specific to Iris.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
The primary class for this module is :class:`IrisTest`.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import collections
import contextlib
import difflib
import filecmp
import functools
import gzip
import inspect
import io
import logging
import os
import os.path
import shutil
import subprocess
import sys
import unittest
import warnings
import xml.dom.minidom
import zlib
try:
from unittest import mock
except ImportError:
import mock
import numpy as np
import numpy.ma as ma
import iris.cube
import iris.config
import iris.util
# Test for availability of matplotlib.
# (And remove matplotlib as an iris.tests dependency.)
try:
import matplotlib
import matplotlib.testing.compare as mcompare
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
else:
MPL_AVAILABLE = True
try:
from osgeo import gdal
except ImportError:
GDAL_AVAILABLE = False
else:
GDAL_AVAILABLE = True
try:
import gribapi
except ImportError:
GRIB_AVAILABLE = False
else:
GRIB_AVAILABLE = True
from iris.fileformats.grib.message import GribMessage
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'results')
if '--data-files-used' in sys.argv:
sys.argv.remove('--data-files-used')
fname = '/var/tmp/all_iris_test_resource_paths.txt'
print('saving list of files used by tests to %s' % fname)
_EXPORT_DATAPATHS_FILE = open(fname, 'w')
else:
_EXPORT_DATAPATHS_FILE = None
if '--create-missing' in sys.argv:
sys.argv.remove('--create-missing')
print('Allowing creation of missing test results.')
os.environ['IRIS_TEST_CREATE_MISSING'] = 'true'
# A shared logger for use by unit tests
logger = logging.getLogger('tests')
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
if MPL_AVAILABLE:
if '-d' in sys.argv:
sys.argv.remove('-d')
plt.switch_backend('tkagg')
_DISPLAY_FIGURES = True
else:
plt.switch_backend('agg')
_DEFAULT_IMAGE_TOLERANCE = 10.0
def main():
"""A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
if '-h' in sys.argv or '--help' in sys.argv:
stdout = sys.stdout
buff = io.StringIO()
# NB. unittest.main() raises an exception after it's shown the help text
try:
sys.stdout = buff
unittest.main()
finally:
sys.stdout = stdout
lines = buff.getvalue().split('\n')
lines.insert(9, 'Iris-specific options:')
lines.insert(10, ' -d Display matplotlib figures (uses tkagg).')
lines.insert(11, ' NOTE: To compare results of failing tests, ')
lines.insert(12, ' use idiff.py instead')
lines.insert(13, ' --data-files-used Save a list of files used to a temporary file')
lines.insert(
14, ' -m Create missing test results')
print('\n'.join(lines))
else:
unittest.main()
def get_data_path(relative_path):
"""
Return the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
data_path = os.path.join(iris.config.TEST_DATA_DIR, relative_path)
if _EXPORT_DATAPATHS_FILE is not None:
_EXPORT_DATAPATHS_FILE.write(data_path + '\n')
if isinstance(data_path, six.string_types) and not os.path.exists(data_path):
# if the file is gzipped, ungzip it and return the path of the ungzipped
# file.
gzipped_fname = data_path + '.gz'
if os.path.exists(gzipped_fname):
with gzip.open(gzipped_fname, 'rb') as gz_fh:
try:
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
except IOError:
# Put ungzipped data file in a temporary path, since we
# can't write to the original path (maybe it is owned by
# the system.)
_, ext = os.path.splitext(data_path)
data_path = iris.util.create_temp_filename(suffix=ext)
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
return data_path
def get_result_path(relative_path):
"""Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings."""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
class IrisTest(unittest.TestCase):
"""A subclass of unittest.TestCase which provides Iris specific testing functionality."""
_assertion_counts = collections.defaultdict(int)
@classmethod
def setUpClass(cls):
# Ensure that the CF profile if turned-off for testing.
iris.site_configuration['cf_profile'] = None
def _assert_str_same(self, reference_str, test_str, reference_filename, type_comparison_name='Strings'):
if reference_str != test_str:
diff = ''.join(difflib.unified_diff(reference_str.splitlines(1), test_str.splitlines(1),
'Reference', 'Test result', '', '', 0))
self.fail("%s do not match: %s\n%s" % (type_comparison_name, reference_filename, diff))
def result_path(self, basename=None, ext=''):
"""
Return the full path to a test result, generated from the \
calling file, class and, optionally, method.
Optional kwargs :
* basename - File basename. If omitted, this is \
generated from the calling method.
* ext - Appended file extension.
"""
if ext and not ext.startswith('.'):
ext = '.' + ext
# Generate the folder name from the calling file name.
path = os.path.abspath(inspect.getfile(self.__class__))
path = os.path.splitext(path)[0]
sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:]
# Generate the file name from the calling function name?
if basename is None:
stack = inspect.stack()
for frame in stack[1:]:
if 'test_' in frame[3]:
basename = frame[3].replace('test_', '')
break
filename = basename + ext
result = os.path.join(get_result_path(''),
sub_path.replace('test_', ''),
self.__class__.__name__.replace('Test_', ''),
filename)
return result
def assertCMLApproxData(self, cubes, reference_filename=None, *args,
**kwargs):
# passes args and kwargs on to approx equal
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
reference_filename = [get_result_path(reference_filename)]
for i, cube in enumerate(cubes):
fname = list(reference_filename)
# don't want the ".cml" for the numpy data file
if fname[-1].endswith(".cml"):
fname[-1] = fname[-1][:-4]
fname[-1] += '.data.%d.npy' % i
self.assertCubeDataAlmostEqual(cube, fname, *args, **kwargs)
self.assertCML(cubes, reference_filename, checksum=False)
def assertCDL(self, netcdf_filename, reference_filename=None, flags='-h'):
"""
Test that the CDL for the given netCDF file matches the contents
of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* netcdf_filename:
The path to the netCDF file.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* flags:
Command-line flags for `ncdump`, as either a whitespace
separated string or an iterable. Defaults to '-h'.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'cdl')
else:
reference_path = get_result_path(reference_filename)
# Convert the netCDF file to CDL file format.
cdl_filename = iris.util.create_temp_filename(suffix='.cdl')
if flags is None:
flags = []
elif isinstance(flags, six.string_types):
flags = flags.split()
else:
flags = list(map(str, flags))
with open(cdl_filename, 'w') as cdl_file:
subprocess.check_call(['ncdump'] + flags + [netcdf_filename],
stderr=cdl_file, stdout=cdl_file)
# Ingest the CDL for comparison, excluding first line.
with open(cdl_filename, 'r') as cdl_file:
lines = cdl_file.readlines()[1:]
# Sort the dimensions (except for the first, which can be unlimited).
# This gives consistent CDL across different platforms.
sort_key = lambda line: ('UNLIMITED' not in line, line)
dimension_lines = slice(lines.index('dimensions:\n') + 1,
lines.index('variables:\n'))
lines[dimension_lines] = sorted(lines[dimension_lines], key=sort_key)
cdl = ''.join(lines)
os.remove(cdl_filename)
self._check_same(cdl, reference_path, type_comparison_name='CDL')
def assertCML(self, cubes, reference_filename=None, checksum=True):
"""
Test that the CML for the given cubes matches the contents of
the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* cubes:
Either a Cube or a sequence of Cubes.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* checksum:
When True, causes the CML to include a checksum for each
Cube's data. Defaults to True.
"""
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
if isinstance(cubes, (list, tuple)):
xml = iris.cube.CubeList(cubes).xml(checksum=checksum, order=False,
byteorder=False)
else:
xml = cubes.xml(checksum=checksum, order=False, byteorder=False)
reference_path = get_result_path(reference_filename)
self._check_same(xml, reference_path)
def assertTextFile(self, source_filename, reference_filename, desc="text file"):
"""Check if two text files are the same, printing any diffs."""
with open(source_filename) as source_file:
source_text = source_file.readlines()
with open(reference_filename) as reference_file:
reference_text = reference_file.readlines()
if reference_text != source_text:
diff = ''.join(difflib.unified_diff(reference_text, source_text, 'Reference', 'Test result', '', '', 0))
self.fail("%s does not match reference file: %s\n%s" % (desc, reference_filename, diff))
def assertCubeDataAlmostEqual(self, cube, reference_filename, *args, **kwargs):
reference_path = get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
result = np.load(reference_path)
if isinstance(result, np.lib.npyio.NpzFile):
self.assertIsInstance(cube.data, ma.MaskedArray, 'Cube data was not a masked array.')
# Avoid comparing any non-initialised array data.
data = cube.data.filled()
np.testing.assert_array_almost_equal(data, result['data'],
*args, **kwargs)
np.testing.assert_array_equal(cube.data.mask, result['mask'])
else:
np.testing.assert_array_almost_equal(cube.data, result, *args, **kwargs)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
if isinstance(cube.data, ma.MaskedArray):
# Avoid recording any non-initialised array data.
data = cube.data.filled()
with open(reference_path, 'wb') as reference_file:
np.savez(reference_file, data=data, mask=cube.data.mask)
else:
with open(reference_path, 'wb') as reference_file:
np.save(reference_file, cube.data)
def assertFilesEqual(self, test_filename, reference_filename):
reference_path = get_result_path(reference_filename)
if self._check_reference_file(reference_path):
fmt = 'test file {!r} does not match reference {!r}.'
self.assertTrue(filecmp.cmp(test_filename, reference_path),
fmt.format(test_filename, reference_path))
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
shutil.copy(test_filename, reference_path)
def assertString(self, string, reference_filename=None):
"""
Test that `string` matches the contents of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* string:
The string to check.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'txt')
else:
reference_path = get_result_path(reference_filename)
self._check_same(string, reference_path,
type_comparison_name='Strings')
def assertRepr(self, obj, reference_filename):
self.assertString(repr(obj), reference_filename)
def _check_same(self, item, reference_path, type_comparison_name='CML'):
if self._check_reference_file(reference_path):
with open(reference_path, 'rb') as reference_fh:
reference = ''.join(part.decode('utf-8')
for part in reference_fh.readlines())
self._assert_str_same(reference, item, reference_path,
type_comparison_name)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
with open(reference_path, 'wb') as reference_fh:
reference_fh.writelines(
part.encode('utf-8')
for part in item)
def assertXMLElement(self, obj, reference_filename):
"""
Calls the xml_element method given obj and asserts the result is the same as the test file.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(obj.xml_element(doc))
pretty_xml = doc.toprettyxml(indent=" ")
reference_path = get_result_path(reference_filename)
self._check_same(pretty_xml, reference_path,
type_comparison_name='XML')
def assertArrayEqual(self, a, b, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
# Define helper function to extract unmasked values as a 1d
# array.
def unmasked_data_as_1d_array(array):
if array.ndim == 0:
if array.mask:
data = np.array([])
else:
data = np.array([array.data])
else:
data = array.data[~ma.getmaskarray(array)]
return data
# Compare masks. This will also check that the array shapes
# match, which is not tested when comparing unmasked values if
# strict is False.
a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
np.testing.assert_array_equal(a_mask, b_mask)
if strict:
assertion(a.data, b.data, **kwargs)
else:
assertion(unmasked_data_as_1d_array(a),
unmasked_data_as_1d_array(b),
**kwargs)
def assertMaskedArrayEqual(self, a, b, strict=False):
"""
Check that masked arrays are equal. This requires the
unmasked values and masks to be identical.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
"""
self._assertMaskedArray(np.testing.assert_array_equal, a, b, strict)
def assertArrayAlmostEqual(self, a, b, decimal=6):
np.testing.assert_array_almost_equal(a, b, decimal=decimal)
def assertMaskedArrayAlmostEqual(self, a, b, decimal=6, strict=False):
"""
Check that masked arrays are almost equal. This requires the
masks to be identical, and the unmasked values to be almost
equal.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
* decimal (int):
Equality tolerance level for
:meth:`numpy.testing.assert_array_almost_equal`, with the meaning
'abs(desired-actual) < 0.5 * 10**(-decimal)'
"""
self._assertMaskedArray(np.testing.assert_array_almost_equal, a, b,
strict, decimal=decimal)
def assertArrayAllClose(self, a, b, rtol=1.0e-7, atol=0.0, **kwargs):
"""
Check arrays are equal, within given relative + absolute tolerances.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* rtol, atol (float):
Relative and absolute tolerances to apply.
Any additional kwargs are passed to numpy.testing.assert_allclose.
Performs pointwise toleranced comparison, and raises an assertion if
the two are not equal 'near enough'.
For full details see underlying routine numpy.testing.assert_allclose.
"""
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, **kwargs)
@contextlib.contextmanager
def temp_filename(self, suffix=''):
filename = iris.util.create_temp_filename(suffix)
try:
yield filename
finally:
os.remove(filename)
def file_checksum(self, file_path):
"""
Generate checksum from file.
"""
with open(file_path, "rb") as in_file:
return zlib.crc32(in_file.read())
def _unique_id(self):
"""
Returns the unique ID for the current assertion.
The ID is composed of two parts: a unique ID for the current test
(which is itself composed of the module, class, and test names), and
a sequential counter (specific to the current test) that is incremented
on each call.
For example, calls from a "test_tx" routine followed by a "test_ty"
routine might result in::
test_plot.TestContourf.test_tx.0
test_plot.TestContourf.test_tx.1
test_plot.TestContourf.test_tx.2
test_plot.TestContourf.test_ty.0
"""
# Obtain a consistent ID for the current test.
# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')[-3:]
if bits[0] == '__main__':
file_name = os.path.basename(sys.modules['__main__'].__file__)
bits[0] = os.path.splitext(file_name)[0]
test_id = '.'.join(bits)
# Derive the sequential assertion ID within the test
assertion_id = self._assertion_counts[test_id]
self._assertion_counts[test_id] += 1
return test_id + '.' + str(assertion_id)
def _check_reference_file(self, reference_path):
reference_exists = os.path.isfile(reference_path)
if not (reference_exists or
os.environ.get('IRIS_TEST_CREATE_MISSING')):
msg = 'Missing test result: {}'.format(reference_path)
raise AssertionError(msg)
return reference_exists
def _ensure_folder(self, path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
logger.warning('Creating folder: %s', dir_path)
os.makedirs(dir_path)
def check_graphic(self, tol=_DEFAULT_IMAGE_TOLERANCE):
"""Checks the CRC matches for the current matplotlib.pyplot figure, and closes the figure."""
unique_id = self._unique_id()
figure = plt.gcf()
try:
expected_fname = os.path.join(os.path.dirname(__file__),
'results', 'visual_tests',
unique_id + '.png')
if not os.path.isdir(os.path.dirname(expected_fname)):
os.makedirs(os.path.dirname(expected_fname))
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError('Write access to a local disk is required '
'to run image tests. Run the tests from a '
'current working directory you have write '
'access to to avoid this issue.')
else:
image_output_directory = os.path.join(
os.getcwd(), 'iris_image_test_output')
result_fname = os.path.join(image_output_directory,
'result-' + unique_id + '.png')
if not os.path.isdir(os.path.dirname(result_fname)):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(os.path.dirname(result_fname))
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
figure.savefig(result_fname)
if not os.path.exists(expected_fname):
warnings.warn('Created image for test %s' % unique_id)
shutil.copy2(result_fname, expected_fname)
err = mcompare.compare_images(expected_fname, result_fname, tol=tol)
if _DISPLAY_FIGURES:
if err:
print('Image comparison would have failed. Message: %s' % err)
plt.show()
else:
assert not err, 'Image comparison failed. Message: %s' % err
finally:
plt.close()
def _remove_testcase_patches(self):
"""Helper to remove per-testcase patches installed by :meth:`patch`."""
# Remove all patches made, ignoring errors.
for p in self.testcase_patches:
p.stop()
# Reset per-test patch control variable.
self.testcase_patches.clear()
def patch(self, *args, **kwargs):
"""
Install a mock.patch, to be removed after the current test.
The patch is created with mock.patch(*args, **kwargs).
Returns:
The substitute object returned by patch.start().
For example::
mock_call = self.patch('module.Class.call', return_value=1)
module_Class_instance.call(3, 4)
self.assertEqual(mock_call.call_args_list, [mock.call(3, 4)])
"""
# Make the new patch and start it.
patch = mock.patch(*args, **kwargs)
start_result = patch.start()
# Create the per-testcases control variable if it does not exist.
# NOTE: this mimics a setUp method, but continues to work when a
# subclass defines its own setUp.
if not hasattr(self, 'testcase_patches'):
self.testcase_patches = {}
# When installing the first patch, schedule remove-all at cleanup.
if not self.testcase_patches:
self.addCleanup(self._remove_testcase_patches)
# Record the new patch and start object for reference.
self.testcase_patches[patch] = start_result
# Return patch replacement object.
return start_result
class GraphicsTest(IrisTest):
def setUp(self):
# Make sure we have no unclosed plots from previous tests before
# generating this one.
if MPL_AVAILABLE:
plt.close('all')
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
if MPL_AVAILABLE:
plt.close('all')
class TestGribMessage(IrisTest):
def assertGribMessageContents(self, filename, contents):
"""
Evaluate whether all messages in a GRIB2 file contain the provided
contents.
* filename (string)
The path on disk of an existing GRIB file
* contents
An iterable of GRIB message keys and expected values.
"""
messages = GribMessage.messages_from_filename(filename)
for message in messages:
for element in contents:
section, key, val = element
self.assertEqual(message.sections[section][key], val)
def assertGribMessageDifference(self, filename1, filename2, diffs,
skip_keys=(), skip_sections=()):
"""
Evaluate that the two messages only differ in the ways specified.
* filename[0|1] (string)
The path on disk of existing GRIB files
* diffs
An dictionary of GRIB message keys and expected diff values:
{key: (m1val, m2val),...} .
* skip_keys
An iterable of key names to ignore during comparison.
* skip_sections
An iterable of section numbers to ignore during comparison.
"""
messages1 = list(GribMessage.messages_from_filename(filename1))
messages2 = list(GribMessage.messages_from_filename(filename2))
self.assertEqual(len(messages1), len(messages2))
for m1, m2 in zip(messages1, messages2):
m1_sect = set(m1.sections.keys())
m2_sect = set(m2.sections.keys())
for missing_section in (m1_sect ^ m2_sect):
what = ('introduced'
if missing_section in m1_sect else 'removed')
# Assert that an introduced section is in the diffs.
self.assertIn(missing_section, skip_sections,
msg='Section {} {}'.format(missing_section,
what))
for section in (m1_sect & m2_sect):
# For each section, check that the differences are
# known diffs.
m1_keys = set(m1.sections[section]._keys)
m2_keys = set(m2.sections[section]._keys)
difference = m1_keys ^ m2_keys
unexpected_differences = difference - set(skip_keys)
if unexpected_differences:
self.fail("There were keys in section {} which \n"
"weren't in both messages and which weren't "
"skipped.\n{}"
"".format(section,
', '.join(unexpected_differences)))
keys_to_compare = m1_keys & m2_keys - set(skip_keys)
for key in keys_to_compare:
m1_value = m1.sections[section][key]
m2_value = m2.sections[section][key]
msg = '{} {} != {}'
if key not in diffs:
# We have a key which we expect to be the same for
# both messages.
if isinstance(m1_value, np.ndarray):
# A large tolerance appears to be required for
# gribapi 1.12, but not for 1.14.
self.assertArrayAlmostEqual(m1_value, m2_value,
decimal=2)
else:
self.assertEqual(m1_value, m2_value,
msg=msg.format(key, m1_value,
m2_value))
else:
# We have a key which we expect to be different
# for each message.
self.assertEqual(m1_value, diffs[key][0],
msg=msg.format(key, m1_value,
diffs[key][0]))
self.assertEqual(m2_value, diffs[key][1],
msg=msg.format(key, m2_value,
diffs[key][1]))
def skip_data(fn):
"""
Decorator to choose whether to run tests, based on the availability of
external data.
Example usage:
@skip_data
class MyDataTests(tests.IrisTest):
...
"""
no_data = (not iris.config.TEST_DATA_DIR
or not os.path.isdir(iris.config.TEST_DATA_DIR)
or os.environ.get('IRIS_TEST_NO_DATA'))
skip = unittest.skipIf(
condition=no_data,
reason='Test(s) require external data.')
return skip(fn)
def skip_gdal(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
GDAL library.
Example usage:
@skip_gdal
class MyGeoTiffTests(test.IrisTest):
...
"""
skip = unittest.skipIf(
condition=not GDAL_AVAILABLE,
reason="Test requires 'gdal'.")
return skip(fn)
def skip_plot(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
matplotlib library.
Example usage:
@skip_plot
class MyPlotTests(test.GraphicsTest):
...
"""
skip = unittest.skipIf(
condition=not MPL_AVAILABLE,
reason='Graphics tests require the matplotlib library.')
return skip(fn)
skip_grib = unittest.skipIf(not GRIB_AVAILABLE, 'Test(s) require "gribapi", '
'which is not available.')
def no_warnings(func):
"""
Provides a decorator to ensure that there are no warnings raised
within the test, otherwise the test will fail.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
with mock.patch('warnings.warn') as warn:
result = func(self, *args, **kwargs)
self.assertEqual(0, warn.call_count,
('Got unexpected warnings.'
' \n{}'.format(warn.call_args_list)))
return result
return wrapped
|
lgpl-3.0
|
Haleyo/spark-tk
|
python/sparktk/frame/ops/to_pandas.py
|
14
|
4721
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def to_pandas(self, n=None, offset=0, columns=None):
"""
Brings data into a local pandas dataframe.
Similar to the 'take' function, but puts the data into a pandas dataframe.
Parameters
----------
:param n: (Optional(int)) The number of rows to get from the frame (warning: do not overwhelm the python session
by taking too much)
:param offset: (Optional(int)) The number of rows to skip before copying. Defaults to 0.
:param columns: (Optional(List[str])) Column filter. The list of names to be included. Default is all columns.
:return: (pandas.DataFrame) A new pandas dataframe object containing the taken frame data.
Examples
--------
<hide>
>>> data = [["Fred", "555-1234"],["Susan", "555-0202"],["Thurston","555-4510"],["Judy","555-2183"]]
>>> column_names = ["name", "phone"]
>>> frame = tc.frame.create(data, column_names)
</hide>
Consider the following spark-tk frame, where we have columns for name and phone number:
>>> frame.inspect()
[#] name phone
=======================
[0] Fred 555-1234
[1] Susan 555-0202
[2] Thurston 555-4510
[3] Judy 555-2183
>>> frame.schema
[('name', <type 'str'>), ('phone', <type 'str'>)]
The frame to_pandas() method is used to get a pandas DataFrame that contains the data from the spark-tk frame. Note
that since no parameters are provided when to_pandas() is called, the default values are used for the number of
rows, the row offset, and the columns.
>>> pandas_frame = frame.to_pandas()
>>> pandas_frame
name phone
0 Fred 555-1234
1 Susan 555-0202
2 Thurston 555-4510
3 Judy 555-2183
"""
try:
import pandas
except:
raise RuntimeError("pandas module not found, unable to download. Install pandas or try the take command.")
from sparktk.frame.ops.take import take_rich
result = take_rich(self, n, offset, columns)
headers, data_types = zip(*result.schema)
frame_data = result.data
from sparktk import dtypes
import datetime
date_time_columns = [i for i, x in enumerate(self.schema) if x[1] in (dtypes.datetime, datetime.datetime)]
has_date_time = len(date_time_columns) > 0
# translate our datetime long to datetime, so that it gets into the pandas df as a datetime column
def long_to_date_time(row):
for i in date_time_columns:
if isinstance(row[i], long):
row[i] = datetime.datetime.fromtimestamp(row[i]//1000).replace(microsecond=row[i]%1000*1000)
return row
if (has_date_time):
frame_data = map(long_to_date_time, frame_data)
# create pandas df
pandas_df = pandas.DataFrame(frame_data, columns=headers)
for i, dtype in enumerate(data_types):
dtype_str = _sparktk_dtype_to_pandas_str(dtype)
try:
pandas_df[[headers[i]]] = pandas_df[[headers[i]]].astype(dtype_str)
except (TypeError, ValueError):
if dtype_str.startswith("int"):
# DataFrame does not handle missing values in int columns. If we get this error, use the 'object' datatype instead.
print "WARNING - Encountered problem casting column %s to %s, possibly due to missing values (i.e. presence of None). Continued by casting column %s as 'object'" % (headers[i], dtype_str, headers[i])
pandas_df[[headers[i]]] = pandas_df[[headers[i]]].astype("object")
else:
raise
return pandas_df
def _sparktk_dtype_to_pandas_str(dtype):
"""maps spark-tk schema types to types understood by pandas, returns string"""
from sparktk import dtypes
if dtype ==dtypes.datetime:
return "datetime64[ns]"
elif dtypes.dtypes.is_primitive_type(dtype):
return dtypes.dtypes.to_string(dtype)
return "object"
|
apache-2.0
|
498143049/seven-segment-display
|
python-svm/roctest.py
|
1
|
3381
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 08:57:13 2015
@author: shifeng
"""
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation,导入iris数据,做数据准备
def getData(url):
file = open(url,"r")
list_arr = file.readlines()
lists = []
for index, x in enumerate(list_arr):
x = x.strip()
x = x.strip('[]')
x = x.split(", ")
lists.append(x)
a = np.array(lists)
a = a.astype(float)
file.close()
return a;
x_n=getData('outn.txt');
y_n=np.zeros(len(x_n));
x_p = getData('outp.txt');
y_p = np.ones(len(x_p));
X = np.concatenate((x_n,x_p))
y = np.concatenate((y_n,y_p))
###############################################################################
# Classification and ROC analysis
# 分类,做ROC分析
# Run classifier with cross-validation and plot ROC curves
# 使用6折交叉验证,并且画ROC曲线
cv = StratifiedKFold(y, n_folds=10)
print(cv)
random_state = np.random.RandomState(0)
classifier = SVC(C=3,gamma=3,kernel='rbf',class_weight='balanced' ,probability=1,random_state=random_state) # 注意这里,probability=True,需要,不然预测的时候会出现异常。另外rbf核效果更好些。
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
# 通过训练数据,使用svm线性核建立模型,并对测试集进行测试,求出预测得分
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# print set(y[train]) #set([0,1]) 即label有两个类别
# print len(X[train]),len(X[test]) #训练集有84个,测试集有16个
# print ("++",probas_) #predict_proba()函数输出的是测试集在lael各类别上的置信度,
# #在哪个类别上的置信度高,则分为哪类
# Compute ROC curve and area the curve
# 通过roc_curve()函数,求出fpr和tpr,以及阈值
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
# print(probas_)
# print(y[test])
mean_tpr += interp(mean_fpr, fpr, tpr) # 对mean_tpr在mean_fpr处进行插值,通过scipy包调用interp()函数
mean_tpr[0] = 0.0 # 初始处为0
roc_auc = auc(fpr, tpr)
# 画图,只需要plt.plot(fpr,tpr),变量roc_auc只是记录auc的值,通过auc()函数能计算出来
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
# 画对角线
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv) # 在mean_fpr100个点,每个点处插值插值多次取平均
mean_tpr[-1] = 1.0 # 坐标最后一个点为(1,1)
mean_auc = auc(mean_fpr, mean_tpr) # 计算平均AUC值
# 画平均ROC曲线
# print mean_fpr,len(mean_fpr)
# print mean_tpr
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
mit
|
btabibian/scikit-learn
|
benchmarks/bench_mnist.py
|
45
|
6977
|
"""
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1,
C=1e4),
'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1,
C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
|
bsd-3-clause
|
rmkoesterer/uga
|
uga/RunSnvplot.py
|
1
|
32158
|
## Copyright (c) 2015 Ryan Koesterer GNU General Public License v3
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
import scipy.stats as scipy
from uga import Parse
import pysam
import math
from uga import Process
import readline
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
import logging
import re
pd.options.mode.chained_assignment = None
pandas2ri.activate()
logging.basicConfig(format='%(asctime)s - %(processName)s - %(name)s - %(message)s',level=logging.DEBUG)
logger = logging.getLogger("RunSnvplot")
def RunSnvplot(args):
cfg = Parse.generate_snvplot_cfg(args)
Parse.print_snvplot_options(cfg)
if not cfg['debug']:
logging.disable(logging.CRITICAL)
ro.r('suppressMessages(library(ggplot2))')
ro.r('suppressMessages(library(grid))')
ro.r('suppressMessages(library(RColorBrewer))')
handle=pysam.TabixFile(filename=cfg['file'],parser=pysam.asVCF())
header = [x for x in handle.header]
skip_rows = len(header)-1
cols = header[-1].split()
pcols = cfg['pcol'].split(',')
cols_extract = [cfg['chrcol'],cfg['bpcol']] + pcols
if cfg['qq_strat_freq']:
if cfg['freqcol'] not in cols:
print(Process.Error("frequency column " + cfg['freqcol'] + " not found, unable to proceed with frequency stratified plots").out)
return 1
else:
cols_extract = cols_extract + [cfg['freqcol']]
print("frequency column " + cfg['freqcol'] + " found")
if cfg['qq_strat_mac']:
if cfg['maccol'] not in cols:
print(Process.Error("minor allele count column " + cfg['maccol'] + " not found, unable to proceed with minor allele count stratified plots").out)
return 1
else:
cols_extract = cols_extract + [cfg['maccol']]
print("minor allele count column " + cfg['maccol'] + " found")
print("importing data")
r = pd.read_table(cfg['file'],sep='\t',skiprows=skip_rows,usecols=cols_extract,compression='gzip')
print(str(r.shape[0]) + " total variants found")
for pcol in pcols:
print("plotting p-values for column " + pcol + " ...")
extract_cols = [cfg['chrcol'],cfg['bpcol'],pcol]
if cfg['freqcol'] in r:
extract_cols = extract_cols + [cfg['freqcol']]
if cfg['maccol'] in r:
extract_cols = extract_cols + [cfg['maccol']]
results = r[extract_cols]
results.dropna(inplace=True)
results = results[(results[pcol] > 0) & (results[pcol] <= 1)].reset_index(drop=True)
print(" " + str(results.shape[0]) + " variants with plottable p-values")
results['logp'] = -1 * np.log10(results[pcol]) + 0.0
ro.globalenv['results'] = results
l = np.median(scipy.chi2.ppf([1-x for x in results[pcol].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
# in R: median(qchisq(results$p, df=1, lower.tail=FALSE))/qchisq(0.5,1)
print(" genomic inflation (all variants) = " + str(l))
if cfg['qq']:
print(" generating standard qq plot")
print(" minimum p-value: " + str(np.min(results[pcol])))
a = -1 * np.log10(ro.r('ppoints(' + str(len(results.index)) + ')'))
a.sort()
results.sort_values(by=['logp'], inplace=True)
print(" maximum -1*log10(p-value): " + str(np.max(results['logp'])))
ci_upper = -1 * np.log10(scipy.beta.ppf(0.95, list(range(1,len(results[pcol]) + 1)), list(range(len(results[pcol]),0,-1))))
ci_upper.sort()
ci_lower = -1 * np.log10(scipy.beta.ppf(0.05, list(range(1,len(results[pcol]) + 1)), list(range(len(results[pcol]),0,-1))))
ci_lower.sort()
ro.globalenv['df'] = ro.DataFrame({'a': ro.FloatVector(a), 'b': ro.FloatVector(results['logp']), 'ci_lower': ro.FloatVector(ci_lower), 'ci_upper': ro.FloatVector(ci_upper)})
dftext_label = 'lambda %~~% ' + str(round(l,3))
ro.globalenv['dftext'] = ro.DataFrame({'x': ro.r('Inf'), 'y': 0.5, 'lab': dftext_label})
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.eps')
else:
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.pdf')
ro.r("""
gp<-ggplot(df)
pp<-gp +
aes_string(x='a',y='b') +
geom_ribbon(aes_string(x='a',ymin='ci_lower',ymax='ci_upper'), data=df, alpha=0.25, fill='black') +
geom_point(size=2) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
geom_text(aes_string(x='x', y='y', label='lab'), data = dftext, colour="black", vjust=0, hjust=1, size = 4, parse=TRUE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.position = 'none',
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped standard qq plot")
ro.r('df$b[df$b > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$b == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=pp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq.cropped.pdf')
ro.r("""
gp<-ggplot(df)
pp<-gp +
aes_string(x='a',y='b') +
geom_ribbon(aes_string(x='a',ymin='ci_lower',ymax='ci_upper'), data=df, alpha=0.25, fill='black') +
geom_point(aes(shape=factor(shape)),size=2) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
geom_text(aes_string(x='x', y='y', label='lab'), data = dftext, colour="black", vjust=0, hjust=1, size = 4, parse=TRUE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.position = 'none',
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
def ppoints(n, a):
try:
n = np.float(len(n))
except TypeError:
n = np.float(n)
return (np.arange(n) + 1 - a)/(n + 1 - 2*a)
if cfg['qq_strat_freq']:
print(" generating frequency stratified qq plot")
strat_ticks = np.sort([np.float(x) for x in cfg['freq_ticks'].split(',')])
results['UGA___QQ_BIN___'] = 0
for i in range(len(strat_ticks)):
results.loc[(results[cfg['freqcol']] >= strat_ticks[i]) & (results[cfg['freqcol']] <= 1-strat_ticks[i]),'UGA___QQ_BIN___'] = i+1
bin_values = results['UGA___QQ_BIN___'].value_counts()
for i in range(len(strat_ticks)+1):
if i not in bin_values.index:
bin_values[i] = 0
counts = pd.DataFrame(bin_values)
counts['lambda'] = np.nan
results['description'] = 'NA'
for i in range(len(strat_ticks)+1):
if counts.loc[i,'UGA___QQ_BIN___'] > 0:
counts.loc[i,'lambda'] = np.median(scipy.chi2.ppf([1-x for x in results[pcol][results['UGA___QQ_BIN___'] == i].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
else:
counts.loc[i,'lambda'] = np.nan
if i == 0:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "(0," + str(strat_ticks[i]) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAF (0," + str(strat_ticks[i]) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
elif i < len(strat_ticks):
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(strat_ticks[i-1]) + "," + str(strat_ticks[i]) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAF [" + str(strat_ticks[i-1]) + "," + str(strat_ticks[i]) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
else:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(strat_ticks[i-1]) + ",0.5] ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAF [" + str(strat_ticks[i-1]) + ",0.5]: n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
results.sort_values(['UGA___QQ_BIN___','logp'],inplace=True)
results['expected'] = 0
for i in counts.index:
if counts.loc[i,'UGA___QQ_BIN___'] > 0:
results.loc[results['UGA___QQ_BIN___'] == i,'expected'] = np.sort(-1 * np.log10(ppoints(len(results.loc[results['UGA___QQ_BIN___'] == i,'expected']),0)))
ro.globalenv['df'] = ro.DataFrame({'expected': ro.FloatVector(results['expected']), 'logp': ro.FloatVector(results['logp']), 'UGA___QQ_BIN___': ro.IntVector(results['UGA___QQ_BIN___']), 'description': ro.StrVector(results['description'])})
ro.r("df<-df[order(df$UGA___QQ_BIN___),]")
ro.r("df$description<-ordered(df$description,levels=unique(df$description))")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes_string(color='description'), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped frequency stratified qq plot")
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_freq.cropped.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes(shape=factor(shape), color=description), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
guides(shape=FALSE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if cfg['qq_strat_mac']:
print(" generating minor allele count stratified qq plot")
strat_ticks = np.sort([np.float(x) for x in cfg['mac_ticks'].split(',')])
results['UGA___QQ_BIN___'] = 0
for i in range(len(strat_ticks)):
results.loc[results[cfg['maccol']] >= strat_ticks[i],'UGA___QQ_BIN___'] = i+1
bin_values = results['UGA___QQ_BIN___'].value_counts()
for i in range(len(strat_ticks)+1):
if i not in bin_values.index:
bin_values[i] = 0
counts = pd.DataFrame(bin_values)
counts['lambda'] = 0
results['description'] = 'NA'
for i in np.sort(counts.index):
if counts.loc[i,'UGA___QQ_BIN___'] > 0:
counts.loc[i,'lambda'] = np.median(scipy.chi2.ppf([1-x for x in results[pcol][results['UGA___QQ_BIN___'] == i].tolist()], df=1))/scipy.chi2.ppf(0.5,1)
else:
counts.loc[i,'lambda'] = np.nan
if i == 0:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "(0," + str(int(strat_ticks[i])) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAC (0," + str(int(strat_ticks[i])) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
elif i < len(strat_ticks):
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(int(strat_ticks[i-1])) + "," + str(int(strat_ticks[i])) + ") ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAC [" + str(int(strat_ticks[i-1])) + "," + str(int(strat_ticks[i])) + "): n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
else:
results.loc[results['UGA___QQ_BIN___'] == i,'description'] = "[" + str(int(strat_ticks[i-1])) + ",...] ~" + str(round(counts.loc[i,'lambda'],3))
print(" MAC [" + str(int(strat_ticks[i-1])) + ",...]: n=" + str(np.int(counts.loc[i,'UGA___QQ_BIN___'])) + ", lambda=" + str(counts.loc[i,'lambda']))
results.sort_values(['UGA___QQ_BIN___','logp'],inplace=True)
results['expected'] = 0
for i in counts.index:
results.loc[results['UGA___QQ_BIN___'] == i,'expected'] = np.sort(-1 * np.log10(ppoints(len(results.loc[results['UGA___QQ_BIN___'] == i,'expected']),0)))
ro.globalenv['df'] = ro.DataFrame({'expected': ro.FloatVector(results['expected']), 'logp': ro.FloatVector(results['logp']), 'UGA___QQ_BIN___': ro.IntVector(results['UGA___QQ_BIN___']), 'description': ro.StrVector(results['description'])})
ro.r("df<-df[order(df$UGA___QQ_BIN___),]")
ro.r("df$description<-ordered(df$description,levels=unique(df$description))")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes_string(color='description'), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if np.max(results['logp']) > cfg['crop']:
print(" generating cropped frequency stratified qq plot")
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=4,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.qq_strat_mac.cropped.pdf')
ro.r("""
gp<-ggplot(df, aes_string(x='expected',y='logp')) +
geom_point(aes(shape=factor(shape), color=description), size=2) +
scale_colour_manual(values=colorRampPalette(brewer.pal(9,"Blues"))(length(unique(df$description))+2)[3:(length(unique(df$description))+2)]) +
geom_abline(intercept=0, slope=1, alpha=0.5) +
scale_x_discrete(expression(Expected~~-log[10](italic(p)))) +
scale_y_discrete(expression(Observed~~-log[10](italic(p)))) +
coord_fixed() +
theme_bw(base_size = 12) +
guides(shape=FALSE) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14), legend.title = element_blank(),
legend.key.height = unit(0.1,"in"), legend.text = element_text(size=6), legend.key = element_blank(), legend.justification = c(0,1),
legend.position = c(0,1), panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.text = element_text(size=12))
%s
""" % (ggsave))
if cfg['mht']:
print(" generating standard manhattan plot")
print(" minimum p-value: " + str(np.min(results[pcol])))
print(" maximum -1*log10(p-value): " + str(np.max(results['logp'])))
if cfg['gc'] and l > 1:
print(" adjusting p-values for genomic inflation for p-value column " + pcol)
results[pcol]=2 * scipy.norm.cdf(-1 * np.abs(scipy.norm.ppf(0.5*results[pcol]) / math.sqrt(l)))
print(" minimum post-gc adjustment p-value: " + str(np.min(results[pcol])))
print(" maximum post-gc adjustment -1*log10(p-value): " + str(np.max(results['logp'])))
else:
print(" skipping genomic inflation correction")
print(" calculating genomic positions")
results.sort_values(by=[cfg['chrcol'],cfg['bpcol']], inplace=True)
ticks = []
lastbase = 0
results['gpos'] = 0
nchr = len(list(np.unique(results[cfg['chrcol']].values)))
chrs = np.unique(results[cfg['chrcol']].values)
if cfg['color']:
colours = ["#08306B","#41AB5D","#000000","#F16913","#3F007D","#EF3B2C","#08519C","#238B45","#252525","#D94801","#54278F","#CB181D","#2171B5","#006D2C","#525252","#A63603","#6A51A3","#A50F15","#4292C6","#00441B","#737373","#7F2704","#807DBA","#67000D"]
else:
colours = ["#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3","#08589e","#4eb3d3"]
if nchr == 1:
results['gpos'] = results[cfg['bpcol']]
results['colours'] = "#08589e"
if results['gpos'].max() - results['gpos'].min() <= 1000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 100 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 10000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 1000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 100000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 10000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 200000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 20000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 300000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 30000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 400000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 40000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 500000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 50000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 600000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 60000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 700000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 70000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 800000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 80000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 900000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 90000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 1000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 100000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 10000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 1000000 == 0]
elif results['gpos'].max() - results['gpos'].min() <= 100000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 10000000 == 0]
elif results['gpos'].max() - results['gpos'].min() > 100000000:
ticks = [x for x in range(results['gpos'].min(),results['gpos'].max()) if x % 25000000 == 0]
else:
results['colours'] = "#000000"
for i in range(len(chrs)):
print(" processed chromosome " + str(int(chrs[i])))
if i == 0:
results.loc[results[cfg['chrcol']] == chrs[i],'gpos'] = results.loc[results[cfg['chrcol']] == chrs[i],cfg['bpcol']]
else:
lastbase = lastbase + results.loc[results[cfg['chrcol']] == chrs[i-1],cfg['bpcol']].iloc[-1]
results.loc[results[cfg['chrcol']] == chrs[i],'gpos'] = (results.loc[results[cfg['chrcol']] == chrs[i],cfg['bpcol']]) + lastbase
if results.loc[results[cfg['chrcol']] == chrs[i]].shape[0] > 1:
ticks.append(results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[0] + (results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[-1] - results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[0])/2)
else:
ticks.append(results.loc[results[cfg['chrcol']] == chrs[i],'gpos'].iloc[0])
results.loc[results[cfg['chrcol']] == chrs[i],'colours'] = colours[int(chrs[i])]
results['logp'] = -1 * np.log10(results[pcol])
if results.shape[0] >= 1000000:
sig = 5.4e-8
else:
sig = 0.05 / results.shape[0]
print(" significance level set to p-value = " + str(sig) + " (-1*log10(p-value) = " + str(-1 * np.log10(sig)) + ")")
print(" " + str(len(results[pcol][results[pcol] <= sig])) + " genome wide significant variants")
chr = results[cfg['chrcol']][0]
maxy=int(max(np.ceil(-1 * np.log10(sig)),np.ceil(results['logp'].max())))
if maxy > 20:
y_breaks = list(range(0,maxy,5))
y_labels = list(range(0,maxy,5))
else:
y_breaks = list(range(0,maxy))
y_labels = list(range(0,maxy))
ro.globalenv['df'] = ro.DataFrame({'gpos': ro.FloatVector(results['gpos']), 'logp': ro.FloatVector(results['logp']), 'colours': ro.FactorVector(results['colours'])})
ro.globalenv['ticks'] = ro.FloatVector(ticks)
ro.globalenv['labels'] = ro.Vector(["{:,}".format(x/1000) for x in ticks])
ro.globalenv['colours'] = ro.StrVector(colours)
ro.globalenv['chrs'] = ro.FloatVector(chrs)
ro.r('save.image("R.img")')
print(" generating manhattan plot")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.pdf')
if nchr == 1:
ro.r("""
gp<-ggplot(df, aes_string(x='gpos',y='logp')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(size=1.5) +
scale_x_continuous(expression(Chromosome~~%d~~(kb)),breaks=ticks,labels=labels) + \
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) + \
theme_bw(base_size = 8) + \
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, chr, maxy, maxy, ggsave))
else:
ro.r("""
gp = ggplot(df, aes_string(x='gpos',y='logp',colour='colours')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(size=1.5) +
scale_colour_manual(values=colours) +
scale_x_continuous(expression(Chromosome),breaks=ticks,labels=chrs) +
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, maxy, maxy, ggsave))
if maxy > cfg['crop']:
maxy = cfg['crop']
ro.r('df$logp[df$logp > ' + str(cfg['crop']) + ']<-' + str(cfg['crop']))
ro.r('df$shape<-0')
ro.r('df$shape[df$logp == ' + str(cfg['crop']) + ']<-1')
print(" generating cropped manhattan plot")
if cfg['ext'] == 'tiff':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",compression="lzw",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.cropped.tiff')
elif cfg['ext'] == 'png':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,units="in",bg="white",dpi=300)' % (cfg['out'] + '.' + pcol + '.mht.cropped.png')
elif cfg['ext'] == 'eps':
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.cropped.eps')
else:
ggsave = 'ggsave(filename="%s",plot=gp,width=16,height=4,bg="white")' % (cfg['out'] + '.' + pcol + '.mht.cropped.pdf')
if nchr == 1:
ro.r("""
gp<-ggplot(df, aes_string(x='gpos',y='logp')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(aes(shape=factor(shape)),size=1.5) +
scale_x_continuous(expression(Chromosome~~%d~~(kb)),breaks=ticks,labels=labels) +
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=10),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, chr, maxy, maxy, ggsave))
else:
ro.r("""
gp = ggplot(df, aes_string(x='gpos',y='logp',colour='colours')) +
geom_hline(yintercept = -1 * log10(%g),colour="#B8860B", linetype=5, size = 0.25) +
geom_point(aes(shape=factor(shape)),size=1.5) +
scale_colour_manual(values=colours) +
scale_x_continuous(expression(Chromosome),breaks=ticks,labels=chrs) +
scale_y_continuous(expression(-log[10](italic(p))),breaks=seq(0,%d,1),limits=c(0,%d)) +
theme_bw(base_size = 8) +
theme(axis.title.x = element_text(vjust=-0.5,size=14), axis.title.y = element_text(vjust=1,angle=90,size=14),
panel.background = element_blank(), panel.border = element_blank(), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.line = element_line(colour="black"), axis.title = element_text(size=8),
axis.text = element_text(size=12), legend.position = 'none')
%s
""" % (sig, maxy, maxy, ggsave))
print("process complete")
return 0
|
gpl-3.0
|
raincoatrun/basemap
|
setup.py
|
1
|
7232
|
import sys, glob, os, subprocess
major, minor1, minor2, s, tmp = sys.version_info
if major==2 and minor1<4 or major<2:
raise SystemExit("""matplotlib and the basemap toolkit require Python 2.4 or later.""")
from distutils.dist import Distribution
from distutils.util import convert_path
from distutils import ccompiler, sysconfig
# Do not require numpy for just querying the package
# Taken from the netcdf-python setup file (which took it from h5py setup file).
inc_dirs = []
if any('--' + opt in sys.argv for opt in Distribution.display_option_names +
['help-commands', 'help']) or sys.argv[1] == 'egg_info':
from distutils.core import setup, Extension
else:
import numpy
# Use numpy versions if they are available.
from numpy.distutils.core import setup, Extension
# append numpy include dir.
inc_dirs.append(numpy.get_include())
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
def checkversion(GEOS_dir):
"""check geos C-API header file (geos_c.h)"""
try:
f = open(os.path.join(GEOS_dir,'include/geos_c.h'))
except IOError:
return None
geos_version = None
for line in f:
if line.startswith('#define GEOS_VERSION'):
geos_version = line.split()[2]
return geos_version
# get location of geos lib from environment variable if it is set.
if 'GEOS_DIR' in os.environ:
GEOS_dir = os.environ.get('GEOS_DIR')
else:
# set GEOS_dir manually here if automatic detection fails.
GEOS_dir = None
user_home = os.path.expanduser('~')
geos_search_locations = [user_home, os.path.join(user_home, 'local'),
'/usr', '/usr/local', '/sw', '/opt', '/opt/local']
if GEOS_dir is None:
# if GEOS_dir not set, check a few standard locations.
GEOS_dirs = geos_search_locations
for direc in GEOS_dirs:
geos_version = checkversion(direc)
sys.stdout.write('checking for GEOS lib in %s ....\n' % direc)
if geos_version is None or geos_version < '"3.1.1"':
continue
else:
sys.stdout.write('GEOS lib (version %s) found in %s\n' %\
(geos_version[1:-1],direc))
GEOS_dir = direc
break
else:
geos_version = checkversion(GEOS_dir)
if GEOS_dir is None:
raise SystemExit("""
Can't find geos library in standard locations ('%s').
Please install the corresponding packages using your
systems software management system (e.g. for Debian Linux do:
'apt-get install libgeos-3.3.3 libgeos-c1 libgeos-dev' and/or
set the environment variable GEOS_DIR to point to the location
where geos is installed (for example, if geos_c.h
is in /usr/local/include, and libgeos_c is in /usr/local/lib,
set GEOS_DIR to /usr/local), or edit the setup.py script
manually and set the variable GEOS_dir (right after the line
that says "set GEOS_dir manually here".""" % "', '".join(geos_search_locations))
else:
geos_include_dirs=[os.path.join(GEOS_dir,'include'),inc_dirs]
geos_library_dirs=[os.path.join(GEOS_dir,'lib'),os.path.join(GEOS_dir,'lib64')]
# proj4 and geos extensions.
deps = glob.glob('src/*.c')
deps.remove(os.path.join('src','_proj.c'))
deps.remove(os.path.join('src','_geoslib.c'))
packages = ['mpl_toolkits','mpl_toolkits.basemap']
namespace_packages = ['mpl_toolkits']
package_dirs = {'':'lib'}
extensions = [Extension("mpl_toolkits.basemap._proj",deps+['src/_proj.c'],include_dirs = ['src'],)]
# can't install _geoslib in mpl_toolkits.basemap namespace,
# or Basemap objects won't be pickleable.
if sys.platform == 'win32':
# don't use runtime_library_dirs on windows (workaround
# for a distutils bug - http://bugs.python.org/issue2437).
#extensions.append(Extension("mpl_toolkits.basemap._geoslib",['src/_geoslib.c'],
extensions.append(Extension("_geoslib",['src/_geoslib.c'],
library_dirs=geos_library_dirs,
include_dirs=geos_include_dirs,
libraries=['geos']))
else:
#extensions.append(Extension("mpl_toolkits.basemap._geoslib",['src/_geoslib.c'],
extensions.append(Extension("_geoslib",['src/_geoslib.c'],
library_dirs=geos_library_dirs,
runtime_library_dirs=geos_library_dirs,
include_dirs=geos_include_dirs,
libraries=['geos_c']))
# Specify all the required mpl data
# create pyproj binary datum shift grid files.
pathout =\
os.path.join('lib',os.path.join('mpl_toolkits',os.path.join('basemap','data')))
if sys.argv[1] not in ['sdist','clean']:
cc = ccompiler.new_compiler()
sysconfig.get_config_vars()
sysconfig.customize_compiler(cc)
cc.set_include_dirs(['src'])
objects = cc.compile(['nad2bin.c', 'src/pj_malloc.c'])
execname = 'nad2bin'
cc.link_executable(objects, execname)
llafiles = glob.glob('datumgrid/*.lla')
cmd = os.path.join(os.getcwd(),execname)
for f in llafiles:
fout = os.path.basename(f.split('.lla')[0])
fout = os.path.join(pathout,fout)
strg = '%s %s < %s' % (cmd, fout, f)
sys.stdout.write('executing %s\n' % strg)
subprocess.call(strg,shell=True)
datafiles = glob.glob(os.path.join(pathout,'*'))
datafiles = [os.path.join('data',os.path.basename(f)) for f in datafiles]
package_data = {'mpl_toolkits.basemap':datafiles}
__version__ = "1.0.8"
setup(
name = "basemap",
version = __version__,
description = "Plot data on map projections with matplotlib",
long_description = """
An add-on toolkit for matplotlib that lets you plot data
on map projections with coastlines, lakes, rivers and political boundaries.
See http://www.scipy.org/wikis/topical_software/Maps for an
example of what it can do.""",
url = "http://matplotlib.sourceforge.net/toolkits.html",
download_url = "https://downloads.sourceforge.net/project/matplotlib/matplotlib-toolkits/basemap-{0}/basemap-{0}.tar.gz".format(__version__),
author = "Jeff Whitaker",
author_email = "[email protected]",
install_requires = ["numpy>=1.2.1", "matplotlib>=1.0.0"],
platforms = ["any"],
license = "OSI Approved",
keywords = ["python","plotting","plots","graphs","charts","GIS","mapping","map projections","maps"],
classifiers = ["Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent"],
packages = packages,
namespace_packages = namespace_packages,
package_dir = package_dirs,
ext_modules = extensions,
cmdclass = {'build_py': build_py},
package_data = package_data
)
|
gpl-2.0
|
padilha/biclustlib
|
biclustlib/algorithms/cca.py
|
1
|
12543
|
"""
biclustlib: A Python library of biclustering algorithms and evaluation measures.
Copyright (C) 2017 Victor Alexandre Padilha
This file is part of biclustlib.
biclustlib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biclustlib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from ._base import BaseBiclusteringAlgorithm
from ..models import Bicluster, Biclustering
from sklearn.utils.validation import check_array
import numpy as np
import bottleneck as bn
import random
import math
class ChengChurchAlgorithm(BaseBiclusteringAlgorithm):
"""Cheng and Church's Algorithm (CCA)
CCA searches for maximal submatrices with a Mean Squared Residue value below a pre-defined threshold.
Reference
----------
Cheng, Y., & Church, G. M. (2000). Biclustering of expression data. In Ismb (Vol. 8, No. 2000, pp. 93-103).
Parameters
----------
num_biclusters : int, default: 10
Number of biclusters to be found.
msr_threshold : float or str, default: 'estimate'
Maximum mean squared residue accepted (delta parameter in the original paper).
If 'estimate', the algorithm will calculate this threshold as:
(((max(data) - min(data)) ** 2) / 12) * 0.005.
multiple_node_deletion_threshold : float, default: 1.2
Scaling factor to remove multiple rows or columns (alpha parameter in the original paper).
data_min_cols : int, default: 100
Minimum number of dataset columns required to perform multiple column deletion.
"""
def __init__(self, num_biclusters=10, msr_threshold='estimate', multiple_node_deletion_threshold=1.2, data_min_cols=100):
self.num_biclusters = num_biclusters
self.msr_threshold = msr_threshold
self.multiple_node_deletion_threshold = multiple_node_deletion_threshold
self.data_min_cols = data_min_cols
def run(self, data):
"""Compute biclustering.
Parameters
----------
data : numpy.ndarray
"""
data = check_array(data, dtype=np.double, copy=True)
self._validate_parameters()
num_rows, num_cols = data.shape
min_value = np.min(data)
max_value = np.max(data)
msr_thr = (((max_value - min_value) ** 2) / 12) * 0.005 if self.msr_threshold == 'estimate' else self.msr_threshold
biclusters = []
for i in range(self.num_biclusters):
rows = np.ones(num_rows, dtype=np.bool)
cols = np.ones(num_cols, dtype=np.bool)
self._multiple_node_deletion(data, rows, cols, msr_thr)
self._single_node_deletion(data, rows, cols, msr_thr)
self._node_addition(data, rows, cols)
row_indices = np.nonzero(rows)[0]
col_indices = np.nonzero(cols)[0]
if len(row_indices) == 0 or len(col_indices) == 0:
break
# masking matrix values
if i < self.num_biclusters - 1:
bicluster_shape = (len(row_indices), len(col_indices))
data[row_indices[:, np.newaxis], col_indices] = np.random.uniform(low=min_value, high=max_value, size=bicluster_shape)
biclusters.append(Bicluster(row_indices, col_indices))
return Biclustering(biclusters)
def _single_node_deletion(self, data, rows, cols, msr_thr):
"""Performs the single row/column deletion step (this is a direct implementation of the Algorithm 1 described in
the original paper)"""
msr, row_msr, col_msr = self._calculate_msr(data, rows, cols)
while msr > msr_thr:
self._single_deletion(data, rows, cols, row_msr, col_msr)
msr, row_msr, col_msr = self._calculate_msr(data, rows, cols)
def _single_deletion(self, data, rows, cols, row_msr, col_msr):
"""Deletes a row or column from the bicluster being computed."""
row_indices = np.nonzero(rows)[0]
col_indices = np.nonzero(cols)[0]
row_max_msr = np.argmax(row_msr)
col_max_msr = np.argmax(col_msr)
if row_msr[row_max_msr] >= col_msr[col_max_msr]:
row2remove = row_indices[row_max_msr]
rows[row2remove] = False
else:
col2remove = col_indices[col_max_msr]
cols[col2remove] = False
def _multiple_node_deletion(self, data, rows, cols, msr_thr):
"""Performs the multiple row/column deletion step (this is a direct implementation of the Algorithm 2 described in
the original paper)"""
msr, row_msr, col_msr = self._calculate_msr(data, rows, cols)
stop = True if msr <= msr_thr else False
while not stop:
cols_old = np.copy(cols)
rows_old = np.copy(rows)
row_indices = np.nonzero(rows)[0]
rows2remove = row_indices[np.where(row_msr > self.multiple_node_deletion_threshold * msr)]
rows[rows2remove] = False
if len(cols) >= self.data_min_cols:
msr, row_msr, col_msr = self._calculate_msr(data, rows, cols)
col_indices = np.nonzero(cols)[0]
cols2remove = col_indices[np.where(col_msr > self.multiple_node_deletion_threshold * msr)]
cols[cols2remove] = False
msr, row_msr, col_msr = self._calculate_msr(data, rows, cols)
# Tests if the new MSR value is smaller than the acceptable MSR threshold.
# Tests if no rows and no columns were removed during this iteration.
# If one of the conditions is true the loop must stop, otherwise it will become an infinite loop.
if msr <= msr_thr or (np.all(rows == rows_old) and np.all(cols == cols_old)):
stop = True
def _node_addition(self, data, rows, cols):
"""Performs the row/column addition step (this is a direct implementation of the Algorithm 3 described in
the original paper)"""
stop = False
while not stop:
cols_old = np.copy(cols)
rows_old = np.copy(rows)
msr, _, _ = self._calculate_msr(data, rows, cols)
col_msr = self._calculate_msr_col_addition(data, rows, cols)
cols2add = np.where(col_msr <= msr)[0]
cols[cols2add] = True
msr, _, _ = self._calculate_msr(data, rows, cols)
row_msr, row_inverse_msr = self._calculate_msr_row_addition(data, rows, cols)
rows2add = np.where(np.logical_or(row_msr <= msr, row_inverse_msr <= msr))[0]
rows[rows2add] = True
if np.all(rows == rows_old) and np.all(cols == cols_old):
stop = True
def _calculate_msr(self, data, rows, cols):
"""Calculate the mean squared residues of the rows, of the columns and of the full data matrix."""
sub_data = data[rows][:, cols]
data_mean = np.mean(sub_data)
row_means = np.mean(sub_data, axis=1)
col_means = np.mean(sub_data, axis=0)
residues = sub_data - row_means[:, np.newaxis] - col_means + data_mean
squared_residues = residues * residues
msr = np.mean(squared_residues)
row_msr = np.mean(squared_residues, axis=1)
col_msr = np.mean(squared_residues, axis=0)
return msr, row_msr, col_msr
def _calculate_msr_col_addition(self, data, rows, cols):
"""Calculate the mean squared residues of the columns for the node addition step."""
sub_data = data[rows][:, cols]
sub_data_rows = data[rows]
data_mean = np.mean(sub_data)
row_means = np.mean(sub_data, axis=1)
col_means = np.mean(sub_data_rows, axis=0)
col_residues = sub_data_rows - row_means[:, np.newaxis] - col_means + data_mean
col_squared_residues = col_residues * col_residues
col_msr = np.mean(col_squared_residues, axis=0)
return col_msr
def _calculate_msr_row_addition(self, data, rows, cols):
"""Calculate the mean squared residues of the rows and of the inverse of the rows for
the node addition step."""
sub_data = data[rows][:, cols]
sub_data_cols = data[:, cols]
data_mean = np.mean(sub_data)
row_means = np.mean(sub_data_cols, axis=1)
col_means = np.mean(sub_data, axis=0)
row_residues = sub_data_cols - row_means[:, np.newaxis] - col_means + data_mean
row_squared_residues = row_residues * row_residues
row_msr = np.mean(row_squared_residues, axis=1)
inverse_residues = -sub_data_cols + row_means[:, np.newaxis] - col_means + data_mean
row_inverse_squared_residues = inverse_residues * inverse_residues
row_inverse_msr = np.mean(row_inverse_squared_residues, axis=1)
return row_msr, row_inverse_msr
def _validate_parameters(self):
if self.num_biclusters <= 0:
raise ValueError("num_biclusters must be > 0, got {}".format(self.num_biclusters))
if self.msr_threshold != 'estimate' and self.msr_threshold < 0.0:
raise ValueError("msr_threshold must be equal to 'estimate' or a numeric value >= 0.0, got {}".format(self.msr_threshold))
if self.multiple_node_deletion_threshold < 1.0:
raise ValueError("multiple_node_deletion_threshold must be >= 1.0, got {}".format(self.multiple_node_deletion_threshold))
if self.data_min_cols < 100:
raise ValueError("data_min_cols must be >= 100, got {}".format(self.data_min_cols))
class ModifiedChengChurchAlgorithm(ChengChurchAlgorithm):
"""Modified Cheng and Church's Algorithm (MCCA)
MCCA searches for maximal submatrices with a Mean Squared Residue value below a pre-defined threshold.
In the single node deletion step implemented in this class, the row/column to be dropped is randomly chosen
among the top alpha% of the objects or features minimizing the Mean Squared Residue of the remaining
matrix.
Reference
----------
Hanczar, B., & Nadif, M. (2012). Ensemble methods for biclustering tasks. Pattern Recognition, 45(11), 3938-3949.
Parameters
----------
num_biclusters : int, default: 10
Number of biclusters to be found.
msr_threshold : float, default: 0.1
Maximum mean squared residue accepted (delta parameter in the original paper).
multiple_node_deletion_threshold : float, default: 1.2
Scaling factor to remove multiple rows or columns (alpha parameter in the original paper).
data_min_cols : int, default: 100
Minimum number of dataset columns required to perform multiple column deletion.
alpha : float, default: 0.05
Percentage of the top objects or features that will be considered in the random choice of the
modified single node deletion step.
"""
def __init__(self, num_biclusters=10, msr_threshold=0.1, multiple_node_deletion_threshold=1.2, data_min_cols=100, alpha=0.05):
super(ModifiedChengChurchAlgorithm, self).__init__(num_biclusters, msr_threshold, multiple_node_deletion_threshold, data_min_cols)
self.alpha = alpha
def _single_deletion(self, data, rows, cols, row_msr, col_msr):
"""Deletes a row or column from the bicluster being computed."""
num_rows, num_cols = data.shape
choice = random.randint(0, 1)
if choice:
self.__random_deletion(data, rows, row_msr, choice)
else:
self.__random_deletion(data, cols, col_msr, choice)
def _validate_parameters(self):
super(ModifiedChengChurchAlgorithm, self)._validate_parameters()
if not (0.0 < self.alpha <= 1.0):
raise ValueError("alpha must be > 0.0 and <= 1.0, got {}".format(self.alpha))
def __random_deletion(self, data, bool_array, msr_array, choice):
indices = np.where(bool_array)[0]
n = int(math.ceil(len(msr_array) * self.alpha))
max_msr_indices = bn.argpartition(msr_array, len(msr_array) - n)[-n:]
i = indices[np.random.choice(max_msr_indices)]
bool_array[i] = False
|
gpl-3.0
|
cluckmaster/MissionPlanner
|
Lib/site-packages/numpy/fft/fftpack.py
|
59
|
39653
|
"""
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
__all__ = ['fft','ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn',
'refft', 'irefft','refftn','irefftn', 'refft2', 'irefft2']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
wsave = fft_cache[n]
except(KeyError):
wsave = init_function(n)
fft_cache[n] = wsave
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0,n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0,s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input (along the axis specified by `axis`) is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n/2+1``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermite-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n/2+1``.
When ``A = rfft(a)``, ``A[0]`` contains the zero-frequency term, which
must be purely real due to the Hermite symmetry.
If `n` is even, ``A[-1]`` contains the term for frequencies ``n/2`` and
``-n/2``, and must also be purely real. If `n` is odd, ``A[-1]``
contains the term for frequency ``A[(n-1)/2]``, and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermite-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n/2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input (along the axis specified by `axis`).
axis : int, optional
Axis over which to compute the inverse FFT.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where `m` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermite-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
The input array.
n : int, optional
The length of the FFT.
axis : int, optional
The axis over which to compute the FFT, assuming Hermitian symmetry
of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal whose spectrum has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
axis : int, optional
Axis over which to compute the inverse FFT, assuming Hermitian
symmetry of the spectrum. Default is the last axis.
Returns
-------
out : ndarray
The transformed input.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal is real in the frequency domain and has
Hermite symmetry in the time domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = range(-len(s), 0)
if len(s) != len(axes):
raise ValueError, "Shape and axes have different lengths."
if invreal and shapeless:
s[axes[-1]] = (s[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = range(len(axes))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a,s,axes,fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 5.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 10.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 15.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 20.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a,s,axes,fft)
def ifft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input (along the axes specified
by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input (along the
axes specified by `axes`) is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where `m` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2,-1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
# Deprecated names
from numpy import deprecate
refft = deprecate(rfft, 'refft', 'rfft')
irefft = deprecate(irfft, 'irefft', 'irfft')
refft2 = deprecate(rfft2, 'refft2', 'rfft2')
irefft2 = deprecate(irfft2, 'irefft2', 'irfft2')
refftn = deprecate(rfftn, 'refftn', 'rfftn')
irefftn = deprecate(irfftn, 'irefftn', 'irfftn')
|
gpl-3.0
|
blbrock/Nest-ODR
|
plot_nest.py
|
2
|
4755
|
import os
import matplotlib as mpl
#mpl.use('WXAgg')
#matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib.cbook as cbook
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
from datetime import datetime, timedelta
from nest_extras import get_parameters
from subprocess import call
import csv
##-------------------------------- Classes --------------------------------------
class data_file(object):
def __init__(self, name, fname):
self.name = name
self.file = fname
self.array = None
self.plot = None
def setarray(self, data):
self.array = data
return self.array
##------------------------------ Functions ---------------------------------------
def get_thermostat_list(infile):
thermostats = []
with open(infile, 'r') as f:
reader = csv.reader(f, delimiter=',', skipinitialspace=True)
next(reader, None) # skip the headers
for row in reader:
if row[0] not in thermostats:
thermostats.append(row[0])
return(thermostats)
def subset_data(infile, thermostat):
outfile = infile.rsplit(os.sep,1)[0] + os.sep + thermostat.replace(' ', '_') + '.csv'
with open(infile,'r') as fin, open (outfile,'w') as fout:
writer = csv.writer(fout, delimiter=',')
rownum = 0
for row in csv.reader(fin, delimiter=','):
if row[0] == thermostat or rownum == 0:
writer.writerow(row)
rownum += 1
return (outfile)
def gen_array(data_file):
datestr2num = lambda x: datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
array = np.genfromtxt(data_file.file, names=True, converters={'Sample_Time': datestr2num},
delimiter=',', dtype=None, skip_header=0, skip_footer=0)
return array
def plot_thermostat(array, thermostat):
time, t_room, t_target = array['Sample_Time'], array['T_room'], array['T_target']
fig = plt.figure()
ax1 = fig.add_subplot(311, axisbg='dimgray')
plt.plot_date(x=time, y=t_target, fmt='-', color='red')
plt.plot_date(x=time, y=t_room, fmt='-')
plt.gcf().autofmt_xdate()
plt.title(thermostat)
plt.ylabel('Temp_room')
plt.xlabel('Date')
plt.show()
return(array)
def plot_thermostats(data_file_list):
i = 1
fig = plt.figure()
# get datetime of one week ago
xlim = datetime.now() - timedelta(days=7)
for d in data_file_list:
time, t_room, t_target, t_setpoint = d.array['Sample_Time'], d.array['T_room'], d.array['T_target'], d.array['T_setpoint']
d.plot = fig.add_subplot(310 + i, axisbg='white')
plt.plot_date(x=time, y=t_room, fmt='-', color='blue', label='Room Temp F')
plt.plot_date(x=time, y=t_target, fmt='-', color='orange', label ='Target Temp F')
plt.plot_date(x=time, y=t_setpoint, fmt='-', color='gray', label = 'Setpoint Temp F')
plt.gcf().autofmt_xdate()
plt.title(d.name)
plt.ylabel('Temp F')
# plt.xlabel('Date')
d.plot.set_xlim([xlim, datetime.now()])
i = i + 1
if d == data_file_list[0]:
legend = plt.legend(loc='upper right', shadow=False)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('white')
frame.set_edgecolor('black')
# Set the fontsize
for label in legend.get_texts():
label.set_color('black')
label.set_fontsize('medium')
loc = d.plot.xaxis.set_major_locator(mpl.dates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR, SA, SU)))
loc2 = d.plot.xaxis.set_minor_locator(mpl.dates.HourLocator(byhour=(12)))
d.plot.xaxis.set_major_formatter(mpl.dates.DateFormatter('%a %d %b\n%H:%M'))
# d.plot.xaxis.set_major_formatter(mpl.dates.DateFormatter('%a %d\n%b %Y\n%H:%M'))
# d.plot.xaxis.set_minor_formatter(mpl.dates.DateFormatter("%H:%M"))
mng = plt.get_current_fig_manager()
## mng.frame.Maximize(True)
### for 'TkAgg' backend
mng.window.state('zoomed')
plt.show()
p = get_parameters()
for key,val in p.items():
if key == 'log_dir':
exec(key + '=val')
csv_file = log_dir + os.sep + 'nest_data.log'
thermostats = get_thermostat_list(csv_file)
data_file_list = []
for t in thermostats:
## data_file(t, subset_data(csv_file, t))
data_file_list.append(data_file(t, subset_data(csv_file, t)))
##zip_list = zip(thermostats,data_file_list)
for z in data_file_list:
z.setarray(gen_array(z))
# data = plot_thermostat(z.array, z.name)
plot_thermostats(data_file_list)
# Clean up temp files
for d in data_file_list:
os.remove(d.file)
|
gpl-3.0
|
kbrannan/PyHSPF
|
src/pyhspf/core/watershed.py
|
1
|
13564
|
# watershed.py
#
# David J. Lampert ([email protected])
#
# This file contains several classes used as data structures to store
# the information needed to build a watershed model.
#
import math
from matplotlib import pyplot, patches
class FlowPlane:
"""
A class that contains information about a flow plane.
"""
def __init__(self,
length,
slope,
centroid,
elev,
):
self.length = length
self.slope = slope
self.centroid = centroid
self.avgelev = elev
class Dam:
"""
A class to store information about a dam.
"""
def __init__(self,
nid,
name,
lon,
lat,
river,
owner,
damtype,
purposes,
year,
height,
max_storage,
norm_storage,
surface_area,
):
# the following are available from the National Inventory of Dams
self.nid = nid # NID number
self.name = name # name
self.lon = lon # longitude
self.lat = lat # latitude
self.river = river # name of primary tributary
self.owner = owner # owner type F = Federal
# L = Local
# P = Private
# S = State
# U = Utility
self.damtype = damtype # material(s) CB = Concrete Buttress
# CN = Concrete
# ER = Rockfill
# MS = Masonry
# MV = MultiArch
# OT = Other
# PG = Gravity
# RE = Earth
# ST = Stone
# TC = Timber Crib
# VA = Arch
self.purposes = purposes # C = Flood control
# D = Debris control
# F = Fish and wildlife
# H = Hydroelectric
# I = Irrigation
# N = Navigation
# O = Other
# P = Stock Pond
# R = Recreation
# S = Water supply
# T = Tailings
self.year = year # year constructed
self.height = height # height (ft)
self.max_storage = max_storage # max storage (acre-ft)
self.norm_storage = norm_storage # storage below normal level (acre-ft)
self.surface_area = surface_area # surface area (acres)
class Reach:
"""
A class that contains information about a stream reach or reservoir.
"""
def __init__(self,
name,
maxelev,
minelev,
slopelen,
flow = None, # optional average flow
velocity = None, # optional average velocity
traveltime = None, # optional average traveltime
dam = None, # optional dam instance
ftable = None # optional stage-discharge table
):
self.name = name
self.maxelev = maxelev
self.minelev = minelev
self.slopelen = slopelen
self.flow = flow
self.velocity = velocity
self.traveltime = traveltime
self.dam = dam
self.ftable = ftable
def add_dam(self, nid, name, long, lat, river, owner, damtype, purposes,
year, height, max_storage, norm_storage, surface_area):
self.dam = Dam(nid, name, long, lat, river, owner, damtype, purposes,
year, height, max_storage, norm_storage, surface_area)
class Subbasin:
"""
A class that contains information about an HSPF watershed subbasin.
"""
def __init__(self, name):
"""Sets up some basic properties of a subbasin."""
self.name = name
self.reach = None
self.inlets = []
self.outlets = []
self.landuse = {}
def add_flowplane(self, length, slope, centroid, elev):
"""Adds the flowplane info to the subbasin."""
self.flowplane = FlowPlane(length, slope, centroid, elev)
def add_reach(self, name, maxelev, minelev, slopelen, flow = None,
velocity = None, traveltime = None, dam = None,
ftable = None):
"""Adds the Reach info to the subbasin."""
self.reach = Reach(name, maxelev, minelev, slopelen, flow = flow,
velocity = velocity, traveltime = traveltime,
dam = dam, ftable = ftable)
def add_dam(self, nid, name, lon, lat, river, owner, damtype, purposes,
year, height, max_storage, norm_storage, surface_area):
if self.reach is not None:
self.reach.add_dam(nid, name, lon, lat, river, owner, damtype,
purposes, year, height, max_storage,
norm_storage, surface_area)
def add_inlet(self, inlet):
"""Adds an inlet to the subbasin."""
self.inlets.append(inlet)
def add_landuse(self, year, landtypes, areas):
self.landuse[year] = {l:a for l, a in zip(landtypes, areas)}
class Watershed:
"""A class that stores all the information about a watershed needed to
build and HSPF model."""
def __init__(self, name, subbasins):
"""Constructor based on a list of instances of the Subbasin class.
subbasins -- a dictionary with keys as subbasin names and values as
instances of the Subbasin class for the watershed
outlets -- a list of the comids of the outlets for the watershed
inlets -- a list of the comids of the inlets for the watershed
updown -- a dictionary linking the mass flows for the watershed;
the keys are subbasin comids and the values are the
downstream subbasin comids
"""
self.name = name
self.subbasins = subbasins
self.outlets = []
self.inlets = []
self.headwaters = []
def add_inlet(self, inlet):
self.inlets.append(inlet)
def add_outlet(self, outlet):
self.outlets.append(outlet)
def add_headwater(self, comid):
self.headwaters.append(comid)
def add_mass_linkage(self, updown):
self.updown = updown
def plot_mass_flow(self,
title = 'Subbasin Reach Mass Flow Diagram',
fontsize = 6,
theight = 0.2,
l = 8.5,
w = 11,
verbose = True,
show = False,
output = None,
):
"""
Makes a schematic of the mass linkages between the various subbasins
in a watershed.
"""
if verbose: print('generating a mass linkage plot\n')
fontheight = fontsize / 72.
rheight = 3 * fontheight
rwidth = 12 * fontheight
xgap = fontheight
ygap = rheight
awidth = rheight / 4
aheight = rheight / 3
# set up a sheet to write the image
fig = pyplot.figure(figsize = (w, l))
ax = fig.add_subplot(111, aspect = 'equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
t = ax.set_title(title)
# divide the subbasins into rows and put them on the chart
# start at the bottom to organize the linkages better
rows = [self.outlets, ['outlet']]
top = False
while not top:
row = []
for next in rows[0]:
for subbasin in self.updown:
if self.updown[subbasin] == next: row.append(subbasin)
if len(row) > 0:
rows.insert(0, row)
else:
top = True
# add an inlet box in the row above each inlet
for inlet in self.inlets:
i = 0
while i < len(rows) - 1:
for subbasin in rows[i]:
if subbasin == inlet:
# find the position of the subbasin in the chart
j = rows[i].index(inlet)
if i > 0:
# figure out where the subbasins point
updowns = [self.updown[s] for s in rows[i-1]]
# if first or last, add it there in the row above
if j == 0:
rows[i-1].insert(0, 'inlet')
elif j == len(rows[i]) - 1:
rows[i-1].append('inlet')
else:
# find the place to add in the preceeding row
n = updowns.index(rows[i][j-1]) + 1
rows[i-1].insert(n, 'inlet')
i += 1
# write the subbasin boxes to the chart
middle = math.ceil(w // (rwidth + xgap)) // 2
last = 0
# keep track of the bounding box of the plot
xmin, ymin, xmax, ymax = middle, 0, middle, 0
for i in range(len(rows)):
row = rows[i]
y = (ygap + rheight) * i + theight
# figure out which cell to put in the main column
if i == 0:
main = row[(len(row) - 1) // 2]
elif i < len(rows) - 1:
main = self.updown[rows[i-1][last]]
else: main = 'outlet'
start = middle - row.index(main)
if i < len(rows) - 1: next_row = rows[i + 1]
for subbasin in row:
x = (rwidth + xgap) * (start + row.index(subbasin))
r = patches.Rectangle((x, y), rwidth, rheight, fill = False)
# adjust the bounding box
if x < xmin: xmin = x
if x + rwidth > xmax: xmax = x + rwidth
if y < ymin: ymin = y
if y + rheight > ymax: ymax = y + rheight
if subbasin != 'outlet': ax.add_patch(r)
b = ax.text(x + rwidth / 2, y + rheight / 2, subbasin,
horizontalalignment = 'center',
verticalalignment = 'center')
# draw the arrow
if i < len(rows) - 1:
x1 = x + rwidth / 2
if i < len(rows) - 2 and subbasin != 'inlet':
next = self.updown[subbasin]
next_start = (middle -
next_row.index(self.updown[main]))
x2 = ((rwidth + xgap) *
(next_start + next_row.index(next))
+ rwidth / 2)
elif subbasin == 'inlet':
next = self.inlets[0]
next_start = (middle -
next_row.index(self.updown[main]))
x2 = ((rwidth + xgap) *
(next_start + next_row.index(next))
+ rwidth / 2)
else:
next_start = middle
x2 = ((rwidth + xgap) * (middle) + rwidth / 2)
a = pyplot.arrow(x1, y + rheight, x2 - x1, ygap,
head_width = awidth, head_length = aheight,
fc = 'k', ec = 'k',
length_includes_head = True)
ax.add_patch(a)
last = row.index(main)
i += 1
pad = 0.02
xmin = xmin - (xmax - xmin) * pad
xmax = xmax + (xmax - xmin) * pad
ymin = ymin - (ymax - ymin) * pad
ymax = ymax + (ymax - ymin) * pad
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymax, ymin)
pyplot.axis('off')
if output is not None: pyplot.savefig(output, dpi = 200)
if show: pyplot.show()
pyplot.clf()
pyplot.close()
|
bsd-3-clause
|
hrabcak/jsbsim
|
tests/CheckOutputRate.py
|
1
|
6222
|
# CheckOutputRate.py
#
# A regression test on the output features that allow to set the output rate
# including enabling/disabling the output.
#
# Copyright (c) 2015 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import string
import xml.etree.ElementTree as et
import pandas as pd
from JSBSim_utils import JSBSimTestCase, CreateFDM, append_xml, RunTest
class CheckOutputRate(JSBSimTestCase):
def setUp(self):
JSBSimTestCase.setUp(self)
self.fdm = CreateFDM(self.sandbox)
self.script_path = self.sandbox.path_to_jsbsim_file('scripts',
'c1722.xml')
# Read the time step 'dt' from the script file
self.tree = et.parse(self.script_path)
root = self.tree.getroot()
use_tag = root.find('use')
aircraft_name = use_tag.attrib['aircraft']
self.run_tag = root.find('run')
self.dt = float(self.run_tag.attrib['dt'])
# Read the date at which the trim will be run
for event in root.findall('run/event'):
if event.attrib['name'] == 'Trim':
cond_tag = event.find('condition')
self.trim_date = float(string.split(cond_tag.text)[-1])
break
# Read the output rate and the output file from the aircraft file
aircraft_path = self.sandbox.path_to_jsbsim_file('aircraft', aircraft_name,
append_xml(aircraft_name))
tree = et.parse(aircraft_path)
output_tag = tree.getroot().find('output')
self.output_file = output_tag.attrib['name']
self.rateHz = float(output_tag.attrib['rate'])
self.rate = int(1.0 / (self.rateHz * self.dt))
def tearDown(self):
del self.fdm
JSBSimTestCase.tearDown(self)
def testOutputRate(self):
self.fdm.load_script(self.script_path)
# Check that the output is enabled by default
self.assertEqual(self.fdm["simulation/output/enabled"], 1.0)
# Check that the rate is consistent with the values extracted from the
# script and the aircraft definition
self.assertAlmostEqual(self.fdm["simulation/output/log_rate_hz"],
self.rateHz, delta=1E-5)
self.fdm.run_ic()
for i in xrange(self.rate):
self.fdm.run()
output = pd.read_csv(self.output_file)
# According to the settings, the output file must contain 2 lines in
# addition to the headers :
# 1. The initial conditions
# 2. The output after 'rate' iterations
self.assertEqual(output['Time'].iget(0), 0.0)
self.assertEqual(output['Time'].iget(1), self.rate * self.dt)
self.assertEqual(output['Time'].iget(1),
self.fdm["simulation/sim-time-sec"])
def testDisablingOutput(self):
self.fdm.load_script(self.script_path)
# Disables the output during the initialization
self.fdm["simulation/output/enabled"] = 0.0
self.fdm.run_ic()
self.fdm["simulation/output/enabled"] = 1.0
for i in xrange(self.rate):
self.fdm.run()
output = pd.read_csv(self.output_file)
# According to the settings, the output file must contain 1 line in
# addition to the headers :
# 1. The output after 'rate' iterations
self.assertEqual(output['Time'].iget(0),
self.fdm["simulation/sim-time-sec"])
def testTrimRestoresOutputSettings(self):
self.fdm.load_script(self.script_path)
# Disables the output during the initialization
self.fdm["simulation/output/enabled"] = 0.0
self.fdm.run_ic()
# Check that the output remains disabled even after the trim is
# executed
while self.fdm["simulation/sim-time-sec"] < self.trim_date + 2.0*self.dt:
self.fdm.run()
self.assertEqual(self.fdm["simulation/output/enabled"], 0.0)
# Re-enable the output and check that the output rate is unaffected by
# the previous operations
self.fdm["simulation/output/enabled"] = 1.0
frame = int(self.fdm["simulation/frame"])
for i in xrange(self.rate):
self.fdm.run()
output = pd.read_csv(self.output_file)
# The frame at which the data is logged must be the next multiple of
# the output rate
self.assertEqual(int(output['Time'].iget(0)/self.dt),
(1 + frame/self.rate)*self.rate)
def testDisablingOutputInScript(self):
property = et.SubElement(self.run_tag, 'property')
property.text = 'simulation/output/enabled'
property.attrib['value'] = "0.0"
self.tree.write('c1722_0.xml')
self.fdm.load_script('c1722_0.xml')
# Check that the output is disabled
self.assertEqual(self.fdm["simulation/output/enabled"], 0.0)
self.fdm.run_ic()
self.fdm["simulation/output/enabled"] = 1.0
for i in xrange(self.rate):
self.fdm.run()
output = pd.read_csv(self.output_file)
# According to the settings, the output file must contain 1 line in
# addition to the headers :
# 1. The output after 'rate' iterations
self.assertEqual(output['Time'].iget(0),
self.fdm["simulation/sim-time-sec"])
RunTest(CheckOutputRate)
|
lgpl-2.1
|
pepper-johnson/Erudition
|
Thesis/Processing/Pipeline/Aggregates/aggregate_adj_matrix.py
|
1
|
1210
|
import json
import datetime
import pandas as pd
import scipy.sparse as sps
# ***********
# Methods:
# ***********
def get_config(config_file):
assert type(config_file) == str
with open(config_file) as f:
config = json.load(f)
return config
# ********
# Main:
# - build sparse right triangle adjacency matrix.
# ********
print("Started at", str(datetime.datetime.now()))
config = get_config(r'./aggregate_adj_matrix.json')
output = config['output']
files = config['files']
print()
print("configurations:")
print("config", str(config))
print()
print("Starting at", str(datetime.datetime.now()))
print()
print('loading file from directory', files[0])
matrix_result = sps.load_npz(files[0] + r'adjacency_matrix.npz').tocsr()
for file in files[1:]:
print('loading file from directory', file)
additional = sps.load_npz(file + r'adjacency_matrix.npz').tocsr()
print('adding result and additional files together ...')
matrix_result = (matrix_result + additional)
print("Saving matrix...")
sps.save_npz(output + '/adjacency_matrix.npz', matrix_result.tocsr())
print("Finished building adjacency matrix - at", str(datetime.datetime.now()))
|
apache-2.0
|
alan-unravel/bokeh
|
bokeh/tests/test_protocol.py
|
42
|
3959
|
from __future__ import absolute_import
import unittest
from unittest import skipIf
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh.protocol import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
self.assertEqual(self.encoder.default(nptrue), True)
self.assertIsInstance(self.encoder.default(nptrue), bool)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh.protocol import serialize_json, deserialize_json
self.serialize = serialize_json
self.deserialize = deserialize_json
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
alexeyknorre/PyVK
|
code/fields2network.py
|
1
|
4118
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 04 13:44:10 2016
@author: Alexey
"""
import pandas as pd
import operator
from transliterate import translit, get_available_language_codes
import re
csv = '../results/profiles.csv'
## test
# csv = '../tests/profiles.csv'
# PAJEK_EDGELIST = '../tests/test_edgelist.net'
# field = "music"
# minimum_occurences = 25
USELESS_WORDS = u'разная|разное|разные|по настроению|под настроение|меломанка|меломан|всякая|все|всякая|нету|зависит от настроения|слушаю всё|всё|нет|_|'
def read_data(fields, csv=csv):
columns = ["uid"] + fields.split(',')
df = pd.read_csv(csv, na_values=["[]"],
usecols=columns, encoding="utf-8")
fields = fields.split(',')
query = '+", " + '.join('df["{0}"]'.format(field) for field in fields)
exec("df['field'] = " + query)
df['field'] = df['field'].str.lower()
df = df.dropna(subset=['field'])
return df
def clean_data(df):
# Remove weird characters
df = df.str.replace('[!_@#¶()]', '', case=False)
# Remove useless words
df = df.str.replace(USELESS_WORDS, '')
# Remove stacks of characters like 00000000
return df.str.replace(r'(.)\1{4,}', '')
def get_sorted_dict(df):
music_freq = {}
for field in df:
elements = field.split(",")
for element in elements:
element = element.strip()
if element in music_freq:
music_freq[element] += 1
else:
music_freq[element] = 1
return music_freq, sorted(music_freq.items(), key=operator.itemgetter(1), reverse=True)
def show_sorted_dict(sorted_dict, elements=50):
for i in range(elements):
print sorted_dict[i][0], sorted_dict[i][1]
def get_arcs(df, vertices):
print "Getting arcs..."
arcs = {}
c = 0
for field in df:
c += 1
if c % 1000 == 0:
print c
elements = []
elements_raw = field.split(",")
for element in elements_raw:
elements.append(element.strip())
for element in elements:
if element not in vertices:
continue
other_elements = elements
other_elements.remove(element)
vertice_from = vertices.index(element)
if other_elements is not None:
for other_element in other_elements:
if other_element not in vertices:
continue
vertice_to = vertices.index(other_element)
# Add 1 so arcs index starts with 1
arc = (vertice_from + 1, vertice_to + 1)
if arc in arcs:
arcs[arc] += 1
else:
arcs[arc] = 1
return arcs
def get_vertices(sorted_dict, minimum_occurences):
print "Getting vertices..."
vertices = []
for i in sorted_dict:
if i[1] >= minimum_occurences and len(i[0]) > 2:
vertices.append(i[0])
return vertices
def save_edgelist(vertices, arcs, fields):
print "Saving..."
PAJEK_EDGELIST = '../results/'+fields+'.net'
with open(PAJEK_EDGELIST, 'wb') as f:
f.write("*Vertices " + str(len(vertices)) + "\r\n")
c = 0
for i in vertices:
c += 1
# Transliterate for Pajek
i = translit(i, "ru", reversed=True)
# Leave only literals and _ for Pajek
i = re.sub("[\W_]+", "_", i)
f.write(str(c) + ' "' + str(i.encode("utf-8")) + '"\r\n')
f.write("*Arcs \r\n")
for i in arcs.items():
f.write(str(i[0][0]) + " " + str(i[0][1]) + " " + str(i[1]) + "\r\n")
def csv2pajek(fields, minimum_occurences):
df = read_data(fields)
df_clean = clean_data(df['field'])
music_freq, srt = get_sorted_dict(df_clean)[0], get_sorted_dict(df_clean)[1]
v = get_vertices(srt, minimum_occurences)
arcs = get_arcs(df_clean, v)
save_edgelist(v, arcs, fields)
|
mit
|
Tejeshwarabm/Westwood
|
src/flow-monitor/examples/wifi-olsr-flowmon.py
|
59
|
7427
|
# -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <[email protected]>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.WifiMacHelper()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
|
tomsilver/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/cbook.py
|
69
|
42525
|
"""
A collection of utility functions and classes. Many (but not all)
from the Python Cookbook -- hence the name cbook
"""
from __future__ import generators
import re, os, errno, sys, StringIO, traceback, locale, threading, types
import time, datetime
import warnings
import numpy as np
import numpy.ma as ma
from weakref import ref
major, minor1, minor2, s, tmp = sys.version_info
# on some systems, locale.getpreferredencoding returns None, which can break unicode
preferredencoding = locale.getpreferredencoding()
def unicode_safe(s):
if preferredencoding is None: return unicode(s)
else: return unicode(s, preferredencoding)
class converter:
"""
Base class for handling string -> python type with support for
missing values
"""
def __init__(self, missing='Null', missingval=None):
self.missing = missing
self.missingval = missingval
def __call__(self, s):
if s==self.missing: return self.missingval
return s
def is_missing(self, s):
return not s.strip() or s==self.missing
class tostr(converter):
'convert to string or None'
def __init__(self, missing='Null', missingval=''):
converter.__init__(self, missing=missing, missingval=missingval)
class todatetime(converter):
'convert to a datetime or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.datetime(*tup[:6])
class todate(converter):
'convert to a date or None'
def __init__(self, fmt='%Y-%m-%d', missing='Null', missingval=None):
'use a :func:`time.strptime` format string for conversion'
converter.__init__(self, missing, missingval)
self.fmt = fmt
def __call__(self, s):
if self.is_missing(s): return self.missingval
tup = time.strptime(s, self.fmt)
return datetime.date(*tup[:3])
class tofloat(converter):
'convert to a float or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
self.missingval = missingval
def __call__(self, s):
if self.is_missing(s): return self.missingval
return float(s)
class toint(converter):
'convert to an int or None'
def __init__(self, missing='Null', missingval=None):
converter.__init__(self, missing)
def __call__(self, s):
if self.is_missing(s): return self.missingval
return int(s)
class CallbackRegistry:
"""
Handle registering and disconnecting for a set of signals and
callbacks::
signals = 'eat', 'drink', 'be merry'
def oneat(x):
print 'eat', x
def ondrink(x):
print 'drink', x
callbacks = CallbackRegistry(signals)
ideat = callbacks.connect('eat', oneat)
iddrink = callbacks.connect('drink', ondrink)
#tmp = callbacks.connect('drunk', ondrink) # this will raise a ValueError
callbacks.process('drink', 123) # will call oneat
callbacks.process('eat', 456) # will call ondrink
callbacks.process('be merry', 456) # nothing will be called
callbacks.disconnect(ideat) # disconnect oneat
callbacks.process('eat', 456) # nothing will be called
"""
def __init__(self, signals):
'*signals* is a sequence of valid signals'
self.signals = set(signals)
# callbacks is a dict mapping the signal to a dictionary
# mapping callback id to the callback function
self.callbacks = dict([(s, dict()) for s in signals])
self._cid = 0
def _check_signal(self, s):
'make sure *s* is a valid signal or raise a ValueError'
if s not in self.signals:
signals = list(self.signals)
signals.sort()
raise ValueError('Unknown signal "%s"; valid signals are %s'%(s, signals))
def connect(self, s, func):
"""
register *func* to be called when a signal *s* is generated
func will be called
"""
self._check_signal(s)
self._cid +=1
self.callbacks[s][self._cid] = func
return self._cid
def disconnect(self, cid):
"""
disconnect the callback registered with callback id *cid*
"""
for eventname, callbackd in self.callbacks.items():
try: del callbackd[cid]
except KeyError: continue
else: return
def process(self, s, *args, **kwargs):
"""
process signal *s*. All of the functions registered to receive
callbacks on *s* will be called with *\*args* and *\*\*kwargs*
"""
self._check_signal(s)
for func in self.callbacks[s].values():
func(*args, **kwargs)
class Scheduler(threading.Thread):
"""
Base class for timeout and idle scheduling
"""
idlelock = threading.Lock()
id = 0
def __init__(self):
threading.Thread.__init__(self)
self.id = Scheduler.id
self._stopped = False
Scheduler.id += 1
self._stopevent = threading.Event()
def stop(self):
if self._stopped: return
self._stopevent.set()
self.join()
self._stopped = True
class Timeout(Scheduler):
"""
Schedule recurring events with a wait time in seconds
"""
def __init__(self, wait, func):
Scheduler.__init__(self)
self.wait = wait
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(self.wait)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class Idle(Scheduler):
"""
Schedule callbacks when scheduler is idle
"""
# the prototype impl is a bit of a poor man's idle handler. It
# just implements a short wait time. But it will provide a
# placeholder for a proper impl ater
waittime = 0.05
def __init__(self, func):
Scheduler.__init__(self)
self.func = func
def run(self):
while not self._stopevent.isSet():
self._stopevent.wait(Idle.waittime)
Scheduler.idlelock.acquire()
b = self.func(self)
Scheduler.idlelock.release()
if not b: break
class silent_list(list):
"""
override repr when returning a list of matplotlib artists to
prevent long, meaningless output. This is meant to be used for a
homogeneous list of a give type
"""
def __init__(self, type, seq=None):
self.type = type
if seq is not None: self.extend(seq)
def __repr__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def __str__(self):
return '<a list of %d %s objects>' % (len(self), self.type)
def strip_math(s):
'remove latex formatting from mathtext'
remove = (r'\mathdefault', r'\rm', r'\cal', r'\tt', r'\it', '\\', '{', '}')
s = s[1:-1]
for r in remove: s = s.replace(r,'')
return s
class Bunch:
"""
Often we want to just collect a bunch of stuff together, naming each
item of the bunch; a dictionary's OK for that, but a small do- nothing
class is even handier, and prettier to use. Whenever you want to
group a few variables:
>>> point = Bunch(datum=2, squared=4, coord=12)
>>> point.datum
By: Alex Martelli
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52308
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def unique(x):
'Return a list of unique elements of *x*'
return dict([ (val, 1) for val in x]).keys()
def iterable(obj):
'return true if *obj* is iterable'
try: len(obj)
except: return False
return True
def is_string_like(obj):
'Return True if *obj* looks like a string'
if isinstance(obj, (str, unicode)): return True
# numpy strings are subclass of str, ma strings are not
if ma.isMaskedArray(obj):
if obj.ndim == 0 and obj.dtype.kind in 'SU':
return True
else:
return False
try: obj + ''
except (TypeError, ValueError): return False
return True
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
if not iterable(obj): return False
if is_string_like(obj): return False
for o in obj:
if not is_string_like(o): return False
return True
def is_writable_file_like(obj):
'return true if *obj* looks like a file object with a *write* method'
return hasattr(obj, 'write') and callable(obj.write)
def is_scalar(obj):
'return true if *obj* is not string like and is not iterable'
return not is_string_like(obj) and not iterable(obj)
def is_numlike(obj):
'return true if *obj* looks like a number'
try: obj+1
except TypeError: return False
else: return True
def to_filehandle(fname, flag='r', return_opened=False):
"""
*fname* can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in .gz. *flag* is a
read/write flag for :func:`file`
"""
if is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, flag)
else:
fh = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fh = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fh, opened
return fh
def is_scalar_or_string(val):
return is_string_like(val) or not iterable(val)
def flatten(seq, scalarp=is_scalar_or_string):
"""
this generator flattens nested containers such as
>>> l=( ('John', 'Hunter'), (1,23), [[[[42,(5,23)]]]])
so that
>>> for i in flatten(l): print i,
John Hunter 1 23 42 5 23
By: Composite of Holger Krekel and Luther Blissett
From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/121294
and Recipe 1.12 in cookbook
"""
for item in seq:
if scalarp(item): yield item
else:
for subitem in flatten(item, scalarp):
yield subitem
class Sorter:
"""
Sort by attribute or item
Example usage::
sort = Sorter()
list = [(1, 2), (4, 8), (0, 3)]
dict = [{'a': 3, 'b': 4}, {'a': 5, 'b': 2}, {'a': 0, 'b': 0},
{'a': 9, 'b': 9}]
sort(list) # default sort
sort(list, 1) # sort by index 1
sort(dict, 'a') # sort a list of dicts by key 'a'
"""
def _helper(self, data, aux, inplace):
aux.sort()
result = [data[i] for junk, i in aux]
if inplace: data[:] = result
return result
def byItem(self, data, itemindex=None, inplace=1):
if itemindex is None:
if inplace:
data.sort()
result = data
else:
result = data[:]
result.sort()
return result
else:
aux = [(data[i][itemindex], i) for i in range(len(data))]
return self._helper(data, aux, inplace)
def byAttribute(self, data, attributename, inplace=1):
aux = [(getattr(data[i],attributename),i) for i in range(len(data))]
return self._helper(data, aux, inplace)
# a couple of handy synonyms
sort = byItem
__call__ = byItem
class Xlator(dict):
"""
All-in-one multiple-string-substitution class
Example usage::
text = "Larry Wall is the creator of Perl"
adict = {
"Larry Wall" : "Guido van Rossum",
"creator" : "Benevolent Dictator for Life",
"Perl" : "Python",
}
print multiple_replace(adict, text)
xlat = Xlator(adict)
print xlat.xlat(text)
"""
def _make_regex(self):
""" Build re object based on the keys of the current dictionary """
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
""" Handler invoked for each regex *match* """
return self[match.group(0)]
def xlat(self, text):
""" Translate *text*, returns the modified text. """
return self._make_regex().sub(self, text)
def soundex(name, len=4):
""" soundex module conforming to Odell-Russell algorithm """
# digits holds the soundex values for the alphabet
soundex_digits = '01230120022455012623010202'
sndx = ''
fc = ''
# Translate letters in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # Remember first letter
d = soundex_digits[ord(c)-ord('A')]
# Duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# Replace first digit with first letter
sndx = fc + sndx[1:]
# Remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# Return soundex code truncated or 0-padded to len characters
return (sndx + (len * '0'))[:len]
class Null:
""" Null objects always and reliably "do nothing." """
def __init__(self, *args, **kwargs): pass
def __call__(self, *args, **kwargs): return self
def __str__(self): return "Null()"
def __repr__(self): return "Null()"
def __nonzero__(self): return 0
def __getattr__(self, name): return self
def __setattr__(self, name, value): return self
def __delattr__(self, name): return self
def mkdirs(newdir, mode=0777):
"""
make directory *newdir* recursively, and set *mode*. Equivalent to ::
> mkdir -p NEWDIR
> chmod MODE NEWDIR
"""
try:
if not os.path.exists(newdir):
parts = os.path.split(newdir)
for i in range(1, len(parts)+1):
thispart = os.path.join(*parts[:i])
if not os.path.exists(thispart):
os.makedirs(thispart, mode)
except OSError, err:
# Reraise the error unless it's about an already existing directory
if err.errno != errno.EEXIST or not os.path.isdir(newdir):
raise
class GetRealpathAndStat:
def __init__(self):
self._cache = {}
def __call__(self, path):
result = self._cache.get(path)
if result is None:
realpath = os.path.realpath(path)
if sys.platform == 'win32':
stat_key = realpath
else:
stat = os.stat(realpath)
stat_key = (stat.st_ino, stat.st_dev)
result = realpath, stat_key
self._cache[path] = result
return result
get_realpath_and_stat = GetRealpathAndStat()
def dict_delall(d, keys):
'delete all of the *keys* from the :class:`dict` *d*'
for key in keys:
try: del d[key]
except KeyError: pass
class RingBuffer:
""" class that implements a not-yet-full buffer """
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
""" class that implements a full buffer """
def append(self, x):
""" Append an element overwriting the oldest one. """
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
""" return list of elements in correct order """
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
"""append an element at the end of the buffer"""
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
# Permanently change self's class from non-full to full
self.__class__ = __Full
def get(self):
""" Return a list of elements from the oldest to the newest. """
return self.data
def __get_item__(self, i):
return self.data[i % len(self.data)]
def get_split_ind(seq, N):
"""
*seq* is a list of words. Return the index into seq such that::
len(' '.join(seq[:ind])<=N
"""
sLen = 0
# todo: use Alex's xrange pattern from the cbook for efficiency
for (word, ind) in zip(seq, range(len(seq))):
sLen += len(word) + 1 # +1 to account for the len(' ')
if sLen>=N: return ind
return len(seq)
def wrap(prefix, text, cols):
'wrap *text* with *prefix* at length *cols*'
pad = ' '*len(prefix.expandtabs())
available = cols - len(pad)
seq = text.split(' ')
Nseq = len(seq)
ind = 0
lines = []
while ind<Nseq:
lastInd = ind
ind += get_split_ind(seq[ind:], available)
lines.append(seq[lastInd:ind])
# add the prefix to the first line, pad with spaces otherwise
ret = prefix + ' '.join(lines[0]) + '\n'
for line in lines[1:]:
ret += pad + ' '.join(line) + '\n'
return ret
# A regular expression used to determine the amount of space to
# remove. It looks for the first sequence of spaces immediately
# following the first newline, or at the beginning of the string.
_find_dedent_regex = re.compile("(?:(?:\n\r?)|^)( *)\S")
# A cache to hold the regexs that actually remove the indent.
_dedent_regex = {}
def dedent(s):
"""
Remove excess indentation from docstring *s*.
Discards any leading blank lines, then removes up to n whitespace
characters from each line, where n is the number of leading
whitespace characters in the first line. It differs from
textwrap.dedent in its deletion of leading blank lines and its use
of the first non-blank line to determine the indentation.
It is also faster in most cases.
"""
# This implementation has a somewhat obtuse use of regular
# expressions. However, this function accounted for almost 30% of
# matplotlib startup time, so it is worthy of optimization at all
# costs.
if not s: # includes case of s is None
return ''
match = _find_dedent_regex.match(s)
if match is None:
return s
# This is the number of spaces to remove from the left-hand side.
nshift = match.end(1) - match.start(1)
if nshift == 0:
return s
# Get a regex that will remove *up to* nshift spaces from the
# beginning of each line. If it isn't in the cache, generate it.
unindent = _dedent_regex.get(nshift, None)
if unindent is None:
unindent = re.compile("\n\r? {0,%d}" % nshift)
_dedent_regex[nshift] = unindent
result = unindent.sub("\n", s).strip()
return result
def listFiles(root, patterns='*', recurse=1, return_folders=0):
"""
Recursively list files
from Parmar and Martelli in the Python Cookbook
"""
import os.path, fnmatch
# Expand patterns from semicolon-separated string to list
pattern_list = patterns.split(';')
# Collect input and output arguments into one bunch
class Bunch:
def __init__(self, **kwds): self.__dict__.update(kwds)
arg = Bunch(recurse=recurse, pattern_list=pattern_list,
return_folders=return_folders, results=[])
def visit(arg, dirname, files):
# Append to arg.results all relevant files (and perhaps folders)
for name in files:
fullname = os.path.normpath(os.path.join(dirname, name))
if arg.return_folders or os.path.isfile(fullname):
for pattern in arg.pattern_list:
if fnmatch.fnmatch(name, pattern):
arg.results.append(fullname)
break
# Block recursion if recursion was disallowed
if not arg.recurse: files[:]=[]
os.path.walk(root, visit, arg)
return arg.results
def get_recursive_filelist(args):
"""
Recurs all the files and dirs in *args* ignoring symbolic links
and return the files as a list of strings
"""
files = []
for arg in args:
if os.path.isfile(arg):
files.append(arg)
continue
if os.path.isdir(arg):
newfiles = listFiles(arg, recurse=1, return_folders=1)
files.extend(newfiles)
return [f for f in files if not os.path.islink(f)]
def pieces(seq, num=2):
"Break up the *seq* into *num* tuples"
start = 0
while 1:
item = seq[start:start+num]
if not len(item): break
yield item
start += num
def exception_to_str(s = None):
sh = StringIO.StringIO()
if s is not None: print >>sh, s
traceback.print_exc(file=sh)
return sh.getvalue()
def allequal(seq):
"""
Return *True* if all elements of *seq* compare equal. If *seq* is
0 or 1 length, return *True*
"""
if len(seq)<2: return True
val = seq[0]
for i in xrange(1, len(seq)):
thisval = seq[i]
if thisval != val: return False
return True
def alltrue(seq):
"""
Return *True* if all elements of *seq* evaluate to *True*. If
*seq* is empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if not val: return False
return True
def onetrue(seq):
"""
Return *True* if one element of *seq* is *True*. It *seq* is
empty, return *False*.
"""
if not len(seq): return False
for val in seq:
if val: return True
return False
def allpairs(x):
"""
return all possible pairs in sequence *x*
Condensed by Alex Martelli from this thread_ on c.l.python
.. _thread: http://groups.google.com/groups?q=all+pairs+group:*python*&hl=en&lr=&ie=UTF-8&selm=mailman.4028.1096403649.5135.python-list%40python.org&rnum=1
"""
return [ (s, f) for i, f in enumerate(x) for s in x[i+1:] ]
# python 2.2 dicts don't have pop--but we don't support 2.2 any more
def popd(d, *args):
"""
Should behave like python2.3 :meth:`dict.pop` method; *d* is a
:class:`dict`::
# returns value for key and deletes item; raises a KeyError if key
# is not in dict
val = popd(d, key)
# returns value for key if key exists, else default. Delete key,
# val item if it exists. Will not raise a KeyError
val = popd(d, key, default)
"""
warnings.warn("Use native python dict.pop method", DeprecationWarning)
# warning added 2008/07/22
if len(args)==1:
key = args[0]
val = d[key]
del d[key]
elif len(args)==2:
key, default = args
val = d.get(key, default)
try: del d[key]
except KeyError: pass
return val
class maxdict(dict):
"""
A dictionary with a maximum size; this doesn't override all the
relevant methods to contrain size, just setitem, so use with
caution
"""
def __init__(self, maxsize):
dict.__init__(self)
self.maxsize = maxsize
self._killkeys = []
def __setitem__(self, k, v):
if len(self)>=self.maxsize:
del self[self._killkeys[0]]
del self._killkeys[0]
dict.__setitem__(self, k, v)
self._killkeys.append(k)
class Stack:
"""
Implement a stack where elements can be pushed on and you can move
back and forth. But no pop. Should mimic home / back / forward
in a browser
"""
def __init__(self, default=None):
self.clear()
self._default = default
def __call__(self):
'return the current element, or None'
if not len(self._elements): return self._default
else: return self._elements[self._pos]
def forward(self):
'move the position forward and return the current element'
N = len(self._elements)
if self._pos<N-1: self._pos += 1
return self()
def back(self):
'move the position back and return the current element'
if self._pos>0: self._pos -= 1
return self()
def push(self, o):
"""
push object onto stack at current position - all elements
occurring later than the current position are discarded
"""
self._elements = self._elements[:self._pos+1]
self._elements.append(o)
self._pos = len(self._elements)-1
return self()
def home(self):
'push the first element onto the top of the stack'
if not len(self._elements): return
self.push(self._elements[0])
return self()
def empty(self):
return len(self._elements)==0
def clear(self):
'empty the stack'
self._pos = -1
self._elements = []
def bubble(self, o):
"""
raise *o* to the top of the stack and return *o*. *o* must be
in the stack
"""
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
bubbles = []
for thiso in old:
if thiso==o: bubbles.append(thiso)
else: self.push(thiso)
for thiso in bubbles:
self.push(o)
return o
def remove(self, o):
'remove element *o* from the stack'
if o not in self._elements:
raise ValueError('Unknown element o')
old = self._elements[:]
self.clear()
for thiso in old:
if thiso==o: continue
else: self.push(thiso)
def popall(seq):
'empty a list'
for i in xrange(len(seq)): seq.pop()
def finddir(o, match, case=False):
"""
return all attributes of *o* which match string in match. if case
is True require an exact case match.
"""
if case:
names = [(name,name) for name in dir(o) if is_string_like(name)]
else:
names = [(name.lower(), name) for name in dir(o) if is_string_like(name)]
match = match.lower()
return [orig for name, orig in names if name.find(match)>=0]
def reverse_dict(d):
'reverse the dictionary -- may lose data if values are not unique!'
return dict([(v,k) for k,v in d.items()])
def report_memory(i=0): # argument may go away
'return the memory consumed by process'
pid = os.getpid()
if sys.platform=='sunos5':
a2 = os.popen('ps -p %d -o osz' % pid).readlines()
mem = int(a2[-1].strip())
elif sys.platform.startswith('linux'):
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
mem = int(a2[1].split()[1])
elif sys.platform.startswith('darwin'):
a2 = os.popen('ps -p %d -o rss,vsz' % pid).readlines()
mem = int(a2[1].split()[0])
return mem
_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d'
def safezip(*args):
'make sure *args* are equal len before zipping'
Nx = len(args[0])
for i, arg in enumerate(args[1:]):
if len(arg) != Nx:
raise ValueError(_safezip_msg % (Nx, i+1, len(arg)))
return zip(*args)
def issubclass_safe(x, klass):
'return issubclass(x, klass) and return False on a TypeError'
try:
return issubclass(x, klass)
except TypeError:
return False
class MemoryMonitor:
def __init__(self, nmax=20000):
self._nmax = nmax
self._mem = np.zeros((self._nmax,), np.int32)
self.clear()
def clear(self):
self._n = 0
self._overflow = False
def __call__(self):
mem = report_memory()
if self._n < self._nmax:
self._mem[self._n] = mem
self._n += 1
else:
self._overflow = True
return mem
def report(self, segments=4):
n = self._n
segments = min(n, segments)
dn = int(n/segments)
ii = range(0, n, dn)
ii[-1] = n-1
print
print 'memory report: i, mem, dmem, dmem/nloops'
print 0, self._mem[0]
for i in range(1, len(ii)):
di = ii[i] - ii[i-1]
if di == 0:
continue
dm = self._mem[ii[i]] - self._mem[ii[i-1]]
print '%5d %5d %3d %8.3f' % (ii[i], self._mem[ii[i]],
dm, dm / float(di))
if self._overflow:
print "Warning: array size was too small for the number of calls."
def xy(self, i0=0, isub=1):
x = np.arange(i0, self._n, isub)
return x, self._mem[i0:self._n:isub]
def plot(self, i0=0, isub=1, fig=None):
if fig is None:
from pylab import figure, show
fig = figure()
ax = fig.add_subplot(111)
ax.plot(*self.xy(i0, isub))
fig.canvas.draw()
def print_cycles(objects, outstream=sys.stdout, show_progress=False):
"""
*objects*
A list of objects to find cycles in. It is often useful to
pass in gc.garbage to find the cycles that are preventing some
objects from being garbage collected.
*outstream*
The stream for output.
*show_progress*
If True, print the number of objects reached as they are found.
"""
import gc
from types import FrameType
def print_path(path):
for i, step in enumerate(path):
# next "wraps around"
next = path[(i + 1) % len(path)]
outstream.write(" %s -- " % str(type(step)))
if isinstance(step, dict):
for key, val in step.items():
if val is next:
outstream.write("[%s]" % repr(key))
break
if key is next:
outstream.write("[key] = %s" % repr(val))
break
elif isinstance(step, list):
outstream.write("[%d]" % step.index(next))
elif isinstance(step, tuple):
outstream.write("( tuple )")
else:
outstream.write(repr(step))
outstream.write(" ->\n")
outstream.write("\n")
def recurse(obj, start, all, current_path):
if show_progress:
outstream.write("%d\r" % len(all))
all[id(obj)] = None
referents = gc.get_referents(obj)
for referent in referents:
# If we've found our way back to the start, this is
# a cycle, so print it out
if referent is start:
print_path(current_path)
# Don't go back through the original list of objects, or
# through temporary references to the object, since those
# are just an artifact of the cycle detector itself.
elif referent is objects or isinstance(referent, FrameType):
continue
# We haven't seen this object before, so recurse
elif id(referent) not in all:
recurse(referent, start, all, current_path + [obj])
for obj in objects:
outstream.write("Examining: %r\n" % (obj,))
recurse(obj, obj, { }, [])
class Grouper(object):
"""
This class provides a lightweight way to group arbitrary objects
together into disjoint sets when a full-blown graph data structure
would be overkill.
Objects can be joined using :meth:`join`, tested for connectedness
using :meth:`joined`, and all disjoint sets can be retreived by
using the object as an iterator.
The objects being joined must be hashable.
For example:
>>> g = grouper.Grouper()
>>> g.join('a', 'b')
>>> g.join('b', 'c')
>>> g.join('d', 'e')
>>> list(g)
[['a', 'b', 'c'], ['d', 'e']]
>>> g.joined('a', 'b')
True
>>> g.joined('a', 'c')
True
>>> g.joined('a', 'd')
False
"""
def __init__(self, init=[]):
mapping = self._mapping = {}
for x in init:
mapping[ref(x)] = [ref(x)]
def __contains__(self, item):
return ref(item) in self._mapping
def clean(self):
"""
Clean dead weak references from the dictionary
"""
mapping = self._mapping
for key, val in mapping.items():
if key() is None:
del mapping[key]
val.remove(key)
def join(self, a, *args):
"""
Join given arguments into the same set. Accepts one or more
arguments.
"""
mapping = self._mapping
set_a = mapping.setdefault(ref(a), [ref(a)])
for arg in args:
set_b = mapping.get(ref(arg))
if set_b is None:
set_a.append(ref(arg))
mapping[ref(arg)] = set_a
elif set_b is not set_a:
if len(set_b) > len(set_a):
set_a, set_b = set_b, set_a
set_a.extend(set_b)
for elem in set_b:
mapping[elem] = set_a
self.clean()
def joined(self, a, b):
"""
Returns True if *a* and *b* are members of the same set.
"""
self.clean()
mapping = self._mapping
try:
return mapping[ref(a)] is mapping[ref(b)]
except KeyError:
return False
def __iter__(self):
"""
Iterate over each of the disjoint sets as a list.
The iterator is invalid if interleaved with calls to join().
"""
self.clean()
class Token: pass
token = Token()
# Mark each group as we come across if by appending a token,
# and don't yield it twice
for group in self._mapping.itervalues():
if not group[-1] is token:
yield [x() for x in group]
group.append(token)
# Cleanup the tokens
for group in self._mapping.itervalues():
if group[-1] is token:
del group[-1]
def get_siblings(self, a):
"""
Returns all of the items joined with *a*, including itself.
"""
self.clean()
siblings = self._mapping.get(ref(a), [ref(a)])
return [x() for x in siblings]
def simple_linear_interpolation(a, steps):
steps = np.floor(steps)
new_length = ((len(a) - 1) * steps) + 1
new_shape = list(a.shape)
new_shape[0] = new_length
result = np.zeros(new_shape, a.dtype)
result[0] = a[0]
a0 = a[0:-1]
a1 = a[1: ]
delta = ((a1 - a0) / steps)
for i in range(1, int(steps)):
result[i::steps] = delta * i + a0
result[steps::steps] = a1
return result
def recursive_remove(path):
if os.path.isdir(path):
for fname in glob.glob(os.path.join(path, '*')) + glob.glob(os.path.join(path, '.*')):
if os.path.isdir(fname):
recursive_remove(fname)
os.removedirs(fname)
else:
os.remove(fname)
#os.removedirs(path)
else:
os.remove(path)
def delete_masked_points(*args):
"""
Find all masked and/or non-finite points in a set of arguments,
and return the arguments with only the unmasked points remaining.
Arguments can be in any of 5 categories:
1) 1-D masked arrays
2) 1-D ndarrays
3) ndarrays with more than one dimension
4) other non-string iterables
5) anything else
The first argument must be in one of the first four categories;
any argument with a length differing from that of the first
argument (and hence anything in category 5) then will be
passed through unchanged.
Masks are obtained from all arguments of the correct length
in categories 1, 2, and 4; a point is bad if masked in a masked
array or if it is a nan or inf. No attempt is made to
extract a mask from categories 2, 3, and 4 if :meth:`np.isfinite`
does not yield a Boolean array.
All input arguments that are not passed unchanged are returned
as ndarrays after removing the points or rows corresponding to
masks in any of the arguments.
A vastly simpler version of this function was originally
written as a helper for Axes.scatter().
"""
if not len(args):
return ()
if (is_string_like(args[0]) or not iterable(args[0])):
raise ValueError("First argument must be a sequence")
nrecs = len(args[0])
margs = []
seqlist = [False] * len(args)
for i, x in enumerate(args):
if (not is_string_like(x)) and iterable(x) and len(x) == nrecs:
seqlist[i] = True
if ma.isMA(x):
if x.ndim > 1:
raise ValueError("Masked arrays must be 1-D")
else:
x = np.asarray(x)
margs.append(x)
masks = [] # list of masks that are True where good
for i, x in enumerate(margs):
if seqlist[i]:
if x.ndim > 1:
continue # Don't try to get nan locations unless 1-D.
if ma.isMA(x):
masks.append(~ma.getmaskarray(x)) # invert the mask
xd = x.data
else:
xd = x
try:
mask = np.isfinite(xd)
if isinstance(mask, np.ndarray):
masks.append(mask)
except: #Fixme: put in tuple of possible exceptions?
pass
if len(masks):
mask = reduce(np.logical_and, masks)
igood = mask.nonzero()[0]
if len(igood) < nrecs:
for i, x in enumerate(margs):
if seqlist[i]:
margs[i] = x.take(igood, axis=0)
for i, x in enumerate(margs):
if seqlist[i] and ma.isMA(x):
margs[i] = x.filled()
return margs
def unmasked_index_ranges(mask, compressed = True):
'''
Find index ranges where *mask* is *False*.
*mask* will be flattened if it is not already 1-D.
Returns Nx2 :class:`numpy.ndarray` with each row the start and stop
indices for slices of the compressed :class:`numpy.ndarray`
corresponding to each of *N* uninterrupted runs of unmasked
values. If optional argument *compressed* is *False*, it returns
the start and stop indices into the original :class:`numpy.ndarray`,
not the compressed :class:`numpy.ndarray`. Returns *None* if there
are no unmasked values.
Example::
y = ma.array(np.arange(5), mask = [0,0,1,0,0])
ii = unmasked_index_ranges(ma.getmaskarray(y))
# returns array [[0,2,] [2,4,]]
y.compressed()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
ii = unmasked_index_ranges(ma.getmaskarray(y), compressed=False)
# returns array [[0, 2], [3, 5]]
y.filled()[ii[1,0]:ii[1,1]]
# returns array [3,4,]
Prior to the transforms refactoring, this was used to support
masked arrays in Line2D.
'''
mask = mask.reshape(mask.size)
m = np.concatenate(((1,), mask, (1,)))
indices = np.arange(len(mask) + 1)
mdif = m[1:] - m[:-1]
i0 = np.compress(mdif == -1, indices)
i1 = np.compress(mdif == 1, indices)
assert len(i0) == len(i1)
if len(i1) == 0:
return None # Maybe this should be np.zeros((0,2), dtype=int)
if not compressed:
return np.concatenate((i0[:, np.newaxis], i1[:, np.newaxis]), axis=1)
seglengths = i1 - i0
breakpoints = np.cumsum(seglengths)
ic0 = np.concatenate(((0,), breakpoints[:-1]))
ic1 = breakpoints
return np.concatenate((ic0[:, np.newaxis], ic1[:, np.newaxis]), axis=1)
# a dict to cross-map linestyle arguments
_linestyles = [('-', 'solid'),
('--', 'dashed'),
('-.', 'dashdot'),
(':', 'dotted')]
ls_mapper = dict(_linestyles)
ls_mapper.update([(ls[1], ls[0]) for ls in _linestyles])
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('less_simple_linear_interpolation has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.less_simple_linear_interpolation( x, y, xi, extrap=extrap )
def isvector(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('isvector has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.isvector( x, y, xi, extrap=extrap )
def vector_lengths( X, P=2., axis=None ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('vector_lengths has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.vector_lengths( X, P=2., axis=axis )
def distances_along_curve( X ):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('distances_along_curve has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.distances_along_curve( X )
def path_length(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('path_length has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.path_length(X)
def is_closed_polygon(X):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('is_closed_polygon has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.is_closed_polygon(X)
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
This function has been moved to matplotlib.mlab -- please import
it from there
"""
# deprecated from cbook in 0.98.4
warnings.warn('quad2cubic has been moved to matplotlib.mlab -- please import it from there', DeprecationWarning)
import matplotlib.mlab as mlab
return mlab.quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y)
if __name__=='__main__':
assert( allequal([1,1,1]) )
assert(not allequal([1,1,0]) )
assert( allequal([]) )
assert( allequal(('a', 'a')))
assert( not allequal(('a', 'b')))
|
gpl-3.0
|
elaeon/ML
|
src/dama/reg/extended/w_sklearn.py
|
1
|
1252
|
from dama.reg.wrappers import SKLP
from sklearn.ensemble import RandomForestRegressor as SkRandomForestReg
from sklearn.ensemble import GradientBoostingRegressor as SkGradientBoostingReg
import pandas as pd
class RandomForestRegressor(SKLP):
def prepare_model(self, obj_fn=None, num_steps: int = 0, model_params=None, batch_size: int = None):
model = SkRandomForestReg(**model_params)
reg_model = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
return self.ml_model(reg_model)
def feature_importance(self):
df = pd.DataFrame({'importance': self.model.model.feature_importances_, 'gain': None}).sort_values(
by=['importance'], ascending=False)
return df
class GradientBoostingRegressor(SKLP):
def prepare_model(self, obj_fn=None, num_steps: int = 0, model_params=None, batch_size: int = None):
model = SkGradientBoostingReg(**model_params)
reg_model = model.fit(self.ds[self.data_groups["data_train_group"]].to_ndarray(),
self.ds[self.data_groups["target_train_group"]].to_ndarray())
return self.ml_model(reg_model)
|
apache-2.0
|
violasox/tellurium
|
spyder_mod/spyderlib/spyder.py
|
1
|
108037
|
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2013 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
Spyder, the Scientific PYthon Development EnviRonment
=====================================================
Developped and maintained by Pierre Raybaut
Copyright © 2009-2012 Pierre Raybaut
Licensed under the terms of the MIT License
(see spyderlib/__init__.py for details)
"""
from __future__ import print_function
#==============================================================================
# Stdlib imports
#==============================================================================
import atexit
import errno
import os
import os.path as osp
import re
import socket
import shutil
import sys
import threading
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Test if IPython v0.13+ is installed to eventually switch to PyQt API #2
#==============================================================================
from spyderlib.baseconfig import _
from spyderlib.ipythonconfig import IPYTHON_QT_INSTALLED, SUPPORTED_IPYTHON
from spyderlib import dependencies
dependencies.add("IPython", _("IPython Console integration"),
required_version=SUPPORTED_IPYTHON)
if IPYTHON_QT_INSTALLED:
# Importing IPython will eventually set the QT_API environment variable
import IPython # analysis:ignore
if os.environ.get('QT_API', 'pyqt') == 'pyqt':
# If PyQt is the selected GUI toolkit (at this stage, only the
# bootstrap script has eventually set this option), switch to
# PyQt API #2 by simply importing the IPython qt module
os.environ['QT_API'] = 'pyqt'
try:
from IPython.external import qt #analysis:ignore
except ImportError:
# Avoid raising any error here: the spyderlib.requirements module
# will take care of it, in a user-friendly way (Tkinter message box
# if no GUI toolkit is installed)
pass
#==============================================================================
# Check requirements
#==============================================================================
from spyderlib import requirements
requirements.check_path()
requirements.check_qt()
#==============================================================================
# Windows platforms only: support for hiding the attached console window
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
if os.name == 'nt':
from spyderlib.utils.windows import (set_attached_console_visible,
is_attached_console_visible)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyderlib
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from spyderlib.qt.QtGui import (QApplication, QMainWindow, QSplashScreen,
QPixmap, QMessageBox, QMenu, QColor, QShortcut,
QKeySequence, QDockWidget, QAction,
QDesktopServices)
from spyderlib.qt.QtCore import SIGNAL, QPoint, Qt, QSize, QByteArray, QUrl
from spyderlib.qt.compat import (from_qvariant, getopenfilename,
getsavefilename)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
# when PySide is selected by the QT_API environment variable and when PyQt4
# is also installed (or any other Qt-based application prepending a directory
# containing incompatible Qt DLLs versions in PATH):
from spyderlib.qt import QtSvg # analysis:ignore
#==============================================================================
# Initial splash screen to reduce perceived startup time.
# It blends with the one of MainWindow (i.e. self.splash) and it's hidden
# just before that one.
#==============================================================================
from spyderlib.baseconfig import _, get_image_path
SPLASH_APP = QApplication([''])
SPLASH = QSplashScreen(QPixmap(get_image_path('Tellurium_splash.png'), 'png'))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
#==============================================================================
# Local utility imports
#==============================================================================
from spyderlib import __version__, __project_url__, __forum_url__, get_versions
from spyderlib.baseconfig import (get_conf_path, get_module_data_path,
get_module_source_path, STDERR, DEBUG, DEV,
debug_print, TEST, SUBFOLDER)
from spyderlib.config import CONF, EDIT_EXT, IMPORT_EXT, OPEN_FILES_PORT
from spyderlib.cli_options import get_options
from spyderlib.userconfig import NoDefault
from spyderlib.utils import encoding, programs
from spyderlib.utils.iofuncs import load_session, save_session, reset_session
from spyderlib.utils.programs import is_module_installed
from spyderlib.utils.introspection import module_completion
from spyderlib.utils.misc import select_port
from spyderlib.py3compat import (PY3, to_text_string, is_text_string, getcwd,
u, qbytearray_to_str, configparser as cp)
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyderlib.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyderlib.utils.qthelpers import (create_action, add_actions, get_icon,
get_std_icon, add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_bookmark_action,
create_program_action, DialogManager,
keybinding, qapplication,
create_python_script_action, file_uri)
from spyderlib.guiconfig import get_shortcut, remove_deprecated_shortcuts
from spyderlib.otherplugins import get_spyderplugins_mods
#==============================================================================
# To save and load temp sessions
#==============================================================================
TEMP_SESSION_PATH = get_conf_path('temp.session.tar')
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
def get_focus_python_shell():
"""Extract and return Python shell from widget
Return None if *widget* is not a Python shell (e.g. IPython kernel)"""
widget = QApplication.focusWidget()
from spyderlib.widgets.shell import PythonShellWidget
from spyderlib.widgets.externalshell.pythonshell import ExternalPythonShell
if isinstance(widget, PythonShellWidget):
return widget
elif isinstance(widget, ExternalPythonShell):
return widget.shell
def get_focus_widget_properties():
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyderlib.widgets.shell import ShellBaseWidget
from spyderlib.widgets.editor import TextEditBaseWidget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget)):
console = isinstance(widget, ShellBaseWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
#==============================================================================
# Qt Stylesheet for MainWindow
#==============================================================================
#TODO: Improve the stylesheet below for separator handles to be visible
# (in Qt, these handles are by default not visible on Windows!)
STYLESHEET="""
QSplitter::handle {
margin-left: 4px;
margin-right: 4px;
}
QSplitter::handle:horizontal {
width: 1px;
border-width: 0px;
background-color: lightgray;
}
QSplitter::handle:vertical {
border-top: 2px ridge lightgray;
border-bottom: 2px;
}
QMainWindow::separator:vertical {
margin-left: 1px;
margin-top: 25px;
margin-bottom: 25px;
border-left: 2px groove lightgray;
border-right: 1px;
}
QMainWindow::separator:horizontal {
margin-top: 1px;
margin-left: 5px;
margin-right: 5px;
border-top: 2px groove lightgray;
border-bottom: 2px;
}
"""
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
SPYDER_PATH = get_conf_path('path')
BOOKMARKS = (
('numpy', "http://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "http://matplotlib.sourceforge.net/contents.html",
_("Matplotlib documentation")),
('PyQt4',
"http://pyqt.sourceforge.net/Docs/PyQt4/",
_("PyQt4 Reference Guide")),
('PyQt4',
"http://pyqt.sourceforge.net/Docs/PyQt4/classes.html",
_("PyQt4 API Reference")),
('xy', "http://code.google.com/p/pythonxy/",
_("Python(x,y)")),
('winpython', "http://code.google.com/p/winpython/",
_("WinPython"))
)
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.light = options.light
self.new_instance = options.new_instance
self.debug_print("Start of MainWindow constructor")
# self.setStyleSheet(STYLESHEET)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
self.load_temp_session_action = create_action(self,
_("Reload last session"),
triggered=lambda:
self.load_session(TEMP_SESSION_PATH))
self.load_session_action = create_action(self,
_("Load session..."),
None, 'fileopen.png',
triggered=self.load_session,
tip=_("Load Spyder session"))
self.save_session_action = create_action(self,
_("Save session and quit..."),
None, 'filesaveas.png',
triggered=self.save_session,
tip=_("Save current session "
"and quit application"))
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.inspector = None
self.onlinehelp = None
self.projectexplorer = None
self.outlineexplorer = None
self.historylog = None
self.extconsole = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Preferences
from spyderlib.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyderlib.plugins.shortcuts import ShortcutsConfigPage
from spyderlib.plugins.runconfig import RunConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Actions
self.close_dockwidget_action = None
self.find_action = None
self.find_next_action = None
self.find_previous_action = None
self.replace_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.delete_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
# Set Window title and icon
if DEV is not None:
title = "Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = "Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += " [DEBUG MODE %d]" % DEBUG
self.setWindowTitle(title)
icon_name = 'spyder_light.svg' if self.light else 'spyder.svg'
# Resampling SVG icon only on non-Windows platforms (see Issue 1314):
self.setWindowIcon(get_icon(icon_name, resample=os.name != 'nt'))
# Showing splash screen
self.splash = SPLASH
if not self.light:
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# Session manager
self.next_session_name = None
self.save_session_name = None
# Track which console plugin type had last focus
# True: Console plugin
# False: IPython console plugin
self.last_console_plugin_focus_was_python = True
# To keep track of the last focused widget
self.last_focused_widget = None
# Server to open external files on a single instance
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize( QSize(iconsize, iconsize) )
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
if not self.light:
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
_text = _("&Find text")
self.find_action = create_action(self, _text, icon='find.png',
tip=_text, triggered=self.find,
context=Qt.WidgetShortcut)
self.register_shortcut(self.find_action, "Editor", "Find text")
self.find_next_action = create_action(self, _("Find &next"),
icon='findnext.png', triggered=self.find_next,
context=Qt.WidgetShortcut)
self.register_shortcut(self.find_next_action, "Editor",
"Find next")
self.find_previous_action = create_action(self,
_("Find &previous"),
icon='findprevious.png', triggered=self.find_previous,
context=Qt.WidgetShortcut)
self.register_shortcut(self.find_previous_action, "Editor",
"Find previous")
_text = _("&Replace text")
self.replace_action = create_action(self, _text, icon='replace.png',
tip=_text, triggered=self.replace,
context=Qt.WidgetShortcut)
self.register_shortcut(self.replace_action, "Editor",
"Replace text")
def create_edit_action(text, tr_text, icon_name):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
return create_action(self, tr_text,
shortcut=keybinding(text.replace(' ', '')),
icon=get_icon(icon_name),
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.undo_action = create_edit_action("Undo", _("Undo"),
'undo.png')
self.redo_action = create_edit_action("Redo", _("Redo"), 'redo.png')
self.copy_action = create_edit_action("Copy", _("Copy"),
'editcopy.png')
self.cut_action = create_edit_action("Cut", _("Cut"), 'editcut.png')
self.paste_action = create_edit_action("Paste", _("Paste"),
'editpaste.png')
self.delete_action = create_edit_action("Delete", _("Delete"),
'editdelete.png')
self.selectall_action = create_edit_action("Select All",
_("Select All"),
'selectall.png')
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.delete_action,
None, self.selectall_action]
self.search_menu_actions = [self.find_action, self.find_next_action,
self.find_previous_action,
self.replace_action]
self.search_toolbar_actions = [self.find_action,
self.find_next_action,
self.replace_action]
namespace = None
if not self.light:
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.connect(self.file_menu, SIGNAL("aboutToShow()"),
self.update_file_menu)
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon='configure.png',
triggered=self.edit_preferences)
self.register_shortcut(prefs_action, "_", "Preferences")
add_shortcut_to_tooltip(prefs_action, context="_",
name="Preferences")
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, 'pythonpath_mgr.png',
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=module_completion.reset,
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [None, update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# Python(x,y) launcher
self.xy_action = create_action(self,
_("Python(x,y) launcher"),
icon=get_icon('pythonxy.png'),
triggered=lambda:
programs.run_python_script('xy', 'xyhome'))
if os.name == 'nt' and is_module_installed('xy'):
self.external_tools_menu_actions.append(self.xy_action)
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
qteact = create_python_script_action(self,
_("Qt examples"), 'qt.png', "PyQt4",
osp.join("examples", "demos",
"qtdemo", "qtdemo"), args)
for act in (qtdact, qtlact, qteact):
if act:
additact.append(act)
if additact and (is_module_installed('winpython') or \
is_module_installed('xy')):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
if is_module_installed('guidata'):
from guidata import configtools
from guidata import config # (loading icons) analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon, "guidata",
osp.join("tests", "__init__"))
if guidata_act:
gdgq_act += [guidata_act]
if is_module_installed('guiqwt'):
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget)
self.register_shortcut(self.maximize_action, "_",
"Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode")
add_shortcut_to_tooltip(self.fullscreen_action, context="_",
name="Fullscreen mode")
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action, None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyderlib.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message="DON'T USE THIS CONSOLE TO RUN CODE!\n\n"
"It's used to report application errors\n"
"and to inspect Spyder internals with\n"
"the following commands:\n"
" spy.app, spy.window, dir(spy)")
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyderlib.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory)
# Object inspector plugin
if CONF.get('inspector', 'enable'):
self.set_splash(_("Loading object inspector..."))
from spyderlib.plugins.inspector import ObjectInspector
self.inspector = ObjectInspector(self)
self.inspector.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyderlib.plugins.outlineexplorer import OutlineExplorer
fullpath_sorting = CONF.get('editor', 'fullpath_sorting', True)
self.outlineexplorer = OutlineExplorer(self,
fullpath_sorting=fullpath_sorting)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyderlib.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon='exit.png', tip=_("Quit"),
triggered=self.console.quit)
self.register_shortcut(quit_action, "_", "Quit")
self.file_menu_actions += [self.load_temp_session_action,
self.load_session_action,
self.save_session_action,
None, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyderlib.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyderlib.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyderlib.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyderlib.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
if CONF.get('project_explorer', 'enable'):
self.set_splash(_("Loading project explorer..."))
from spyderlib.plugins.projectexplorer import ProjectExplorer
self.projectexplorer = ProjectExplorer(self)
self.projectexplorer.register_plugin()
# External console
if self.light:
# This is necessary to support the --working-directory option:
if self.init_workdir is not None:
os.chdir(self.init_workdir)
else:
self.set_splash(_("Loading external console..."))
from spyderlib.plugins.externalconsole import ExternalConsole
self.extconsole = ExternalConsole(self, light_mode=self.light)
self.extconsole.register_plugin()
# Namespace browser
if not self.light:
# In light mode, namespace browser is opened inside external console
# Here, it is opened as an independent plugin, in its own dockwidget
self.set_splash(_("Loading namespace browser..."))
from spyderlib.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# IPython console
if IPYTHON_QT_INSTALLED and not self.light:
self.set_splash(_("Loading IPython console..."))
from spyderlib.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
if not self.light:
nsb = self.variableexplorer.add_shellwidget(self.console.shell)
self.connect(self.console.shell, SIGNAL('refresh()'),
nsb.refresh_table)
nsb.auto_refresh_button.setEnabled(False)
self.set_splash(_("Setting up main window..."))
# Help menu
dep_action = create_action(self, _("Optional dependencies..."),
triggered=self.show_dependencies,
icon='advanced.png')
report_action = create_action(self,
_("Report issue..."),
icon=get_icon('bug.png'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
# Spyder documentation
doc_path = get_module_data_path('spyderlib', relpath="doc",
attr_name='DOCPATH')
# * Trying to find the chm doc
spyder_doc = osp.join(doc_path, "Spyderdoc.chm")
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(doc_path, os.pardir, "Spyderdoc.chm")
# * Trying to find the html doc
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(doc_path, "index.html")
# * Trying to find the development-version html doc
if not osp.isfile(spyder_doc):
spyder_doc = osp.join(get_module_source_path('spyderlib'),
os.pardir, 'build', 'lib', 'spyderlib',
'doc', "index.html")
# * If we totally fail, point to our web build
if not osp.isfile(spyder_doc):
spyder_doc = 'http://pythonhosted.org/spyder'
else:
spyder_doc = file_uri(spyder_doc)
doc_action = create_bookmark_action(self, spyder_doc,
_("Spyder documentation"), shortcut="F1",
icon=get_std_icon('DialogHelpButton'))
self.help_menu_actions = [doc_action, report_action,
dep_action, support_action, None]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.ipyconsole is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
path = file_uri(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=lambda path=path: programs.start_file(path))
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Documentation provided by Python(x,y), if available
try:
from xy.config import DOC_PATH as xy_doc_path
xydoc = osp.join(xy_doc_path, "Libraries")
def add_xydoc(text, pathlist):
for path in pathlist:
if osp.exists(path):
add_ipm_action(text, path)
break
add_xydoc(_("Python(x,y) documentation folder"),
[xy_doc_path])
add_xydoc(_("IPython documentation"),
[osp.join(xydoc, "IPython", "ipythondoc.chm")])
add_xydoc(_("guidata documentation"),
[osp.join(xydoc, "guidata", "guidatadoc.chm"),
r"D:\Python\guidata\build\doc_chm\guidatadoc.chm"])
add_xydoc(_("guiqwt documentation"),
[osp.join(xydoc, "guiqwt", "guiqwtdoc.chm"),
r"D:\Python\guiqwt\build\doc_chm\guiqwtdoc.chm"])
add_xydoc(_("Matplotlib documentation"),
[osp.join(xydoc, "matplotlib", "Matplotlibdoc.chm"),
osp.join(xydoc, "matplotlib", "Matplotlib.pdf")])
add_xydoc(_("NumPy documentation"),
[osp.join(xydoc, "NumPy", "numpy.chm")])
add_xydoc(_("NumPy reference guide"),
[osp.join(xydoc, "NumPy", "numpy-ref.pdf")])
add_xydoc(_("NumPy user guide"),
[osp.join(xydoc, "NumPy", "numpy-user.pdf")])
add_xydoc(_("SciPy documentation"),
[osp.join(xydoc, "SciPy", "scipy.chm"),
osp.join(xydoc, "SciPy", "scipy-ref.pdf")])
except (ImportError, KeyError, RuntimeError):
pass
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
qta_act = create_program_action(self, _("Qt documentation"),
"assistant")
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=get_std_icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [None, about_action]
# Status bar widgets
from spyderlib.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# Third-party plugins
for mod in get_spyderplugins_mods(prefix='p_', extension='.py'):
try:
plugin = mod.PLUGIN_CLASS(self)
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except AttributeError as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.view_menu.addMenu(self.plugins_menu)
self.view_menu.addMenu(self.toolbars_menu)
reset_layout_action = create_action(self, _("Reset window layout"),
triggered=self.reset_window_layout)
quick_layout_menu = QMenu(_("Custom window layouts"), self)
ql_actions = []
for index in range(1, 4):
if index > 0:
ql_actions += [None]
qli_act = create_action(self,
_("Switch to/from layout %d") % index,
triggered=lambda i=index:
self.quick_layout_switch(i))
self.register_shortcut(qli_act, "_",
"Switch to/from layout %d" % index)
qlsi_act = create_action(self, _("Set layout %d") % index,
triggered=lambda i=index:
self.quick_layout_set(i))
self.register_shortcut(qlsi_act, "_", "Set layout %d" % index)
ql_actions += [qli_act, qlsi_act]
add_actions(quick_layout_menu, ql_actions)
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (None, cmd_act))
add_actions(self.view_menu, (None, self.fullscreen_action,
self.maximize_action,
self.close_dockwidget_action, None,
reset_layout_action,
quick_layout_menu))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"),
icon="ext_tools.png")
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
self.main_toolbar_actions.append(external_tools_act)
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
#self.remove_deprecated_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.emit(SIGNAL('all_actions_defined()'))
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
self.connect(child, SIGNAL("aboutToShow()"),
self.update_edit_menu)
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.emit(SIGNAL('restore_scrollbar_position()'))
if self.projectexplorer is not None:
self.projectexplorer.check_for_io_errors()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# Remove settings test directory
if TEST is not None:
import tempfile
conf_dir = osp.join(tempfile.gettempdir(), SUBFOLDER)
atexit.register(shutil.rmtree, conf_dir, ignore_errors=True)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if sys.platform == 'darwin' and 'Spyder.app' in __file__:
import subprocess
idx = __file__.index('Spyder.app')
app_path = __file__[:idx]
subprocess.call(['open', app_path + 'Spyder.app'])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if CONF.get('main', 'single_instance') and not self.new_instance:
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.connect(self, SIGNAL('open_external_file(QString)'),
lambda fname: self.open_external_file(fname))
# Create Plugins and toolbars submenus
if not self.light:
self.create_plugins_menu()
self.create_toolbars_menu()
# Open a Python console for light mode
if self.light:
self.extconsole.open_interpreter()
self.extconsole.setMinimumHeight(0)
if not self.light:
# Hide Internal Console so that people doesn't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.dockwidget.hide()
# Show the Object Inspector and Consoles by default
plugins_to_show = [self.inspector]
if self.ipyconsole is not None:
if self.ipyconsole.isvisible:
plugins_to_show += [self.extconsole, self.ipyconsole]
else:
plugins_to_show += [self.ipyconsole, self.extconsole]
else:
plugins_to_show += [self.extconsole]
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
ipy_visible = self.ipyconsole is not None and self.ipyconsole.isvisible
if not self.extconsole.isvisible and not ipy_visible:
self.historylog.add_history(get_conf_path('history.py'))
# Give focus to the Editor
if self.editor.dockwidget.isVisible():
try:
self.editor.get_focus_widget().setFocus()
except AttributeError:
pass
self.is_setting_up = False
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
if not self.light:
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(str(hexstate)) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main'):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
if not self.light:
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
qba = self.saveState()
CONF.set(section, prefix+'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = ('lightwindow' if self.light else 'window') + '/'
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = self.load_window_settings(prefix, default)
if hexstate is None and not self.light:
# First Spyder execution:
# trying to set-up the dockwidget/toolbar positions to the best
# appearance possible
splitting = (
(self.projectexplorer, self.editor, Qt.Horizontal),
(self.editor, self.outlineexplorer, Qt.Horizontal),
(self.outlineexplorer, self.inspector, Qt.Horizontal),
(self.inspector, self.console, Qt.Vertical),
)
for first, second, orientation in splitting:
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
orientation)
for first, second in ((self.console, self.extconsole),
(self.extconsole, self.ipyconsole),
(self.ipyconsole, self.historylog),
(self.inspector, self.variableexplorer),
(self.variableexplorer, self.onlinehelp),
(self.onlinehelp, self.explorer),
(self.explorer, self.findinfiles),
):
if first is not None and second is not None:
self.tabify_plugins(first, second)
for plugin in [self.findinfiles, self.onlinehelp, self.console,
]+self.thirdparty_plugins:
if plugin is not None:
plugin.dockwidget.close()
for plugin in (self.inspector, self.extconsole):
if plugin is not None:
plugin.dockwidget.raise_()
self.extconsole.setMinimumHeight(250)
hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
for toolbar in hidden_toolbars:
toolbar.close()
for plugin in (self.projectexplorer, self.outlineexplorer):
plugin.dockwidget.close()
self.set_window_settings(hexstate, window_size, prefs_dialog_size, pos,
is_maximized, is_fullscreen)
for plugin in self.widgetlist:
plugin.initialize_plugin_in_mainwindow_layout()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
if self.current_quick_layout == index:
self.set_window_settings(*self.previous_layout_settings)
self.current_quick_layout = None
else:
try:
settings = self.load_window_settings('layout_%d/' % index,
section='quick_layouts')
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%d has not yet "
"been defined.") % index)
return
self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
def quick_layout_set(self, index):
"""Save current window settings as quick layout number *index*"""
self.save_current_window_settings('layout_%d/' % index,
section='quick_layouts')
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
if self.light:
# There is currently no point doing the following in light mode
return
self.update_edit_menu()
self.update_search_menu()
# Now deal with Python shell and IPython plugins
shell = get_focus_python_shell()
if shell is not None:
# A Python shell widget has focus
self.last_console_plugin_focus_was_python = True
if self.inspector is not None:
# The object inspector may be disabled in .spyder.ini
self.inspector.set_shell(shell)
from spyderlib.widgets.externalshell import pythonshell
if isinstance(shell, pythonshell.ExtPythonShellWidget):
shell = shell.parent()
self.variableexplorer.set_shellwidget_from_id(id(shell))
elif self.ipyconsole is not None:
focus_client = self.ipyconsole.get_focus_client()
if focus_client is not None:
self.last_console_plugin_focus_was_python = False
kwid = focus_client.kernel_widget_id
if kwid is not None:
idx = self.extconsole.get_shell_index_from_id(kwid)
if idx is not None:
kw = self.extconsole.shellwidgets[idx]
if self.inspector is not None:
self.inspector.set_shell(kw)
self.variableexplorer.set_shellwidget_from_id(kwid)
# Setting the kernel widget as current widget for the
# external console's tabwidget: this is necessary for
# the editor/console link to be working (otherwise,
# features like "Execute in current interpreter" will
# not work with IPython clients unless the associated
# IPython kernel has been selected in the external
# console... that's not brilliant, but it works for
# now: we shall take action on this later
self.extconsole.tabwidget.setCurrentWidget(kw)
focus_client.get_control().setFocus()
def update_file_menu(self):
"""Update file menu"""
self.load_temp_session_action.setEnabled(osp.isfile(TEMP_SESSION_PATH))
def update_edit_menu(self):
"""Update edit menu"""
if self.menuBar().hasFocus():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
widget, textedit_properties = get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
self.delete_action.setEnabled(has_selection and not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
# Disabling all actions to begin with
for child in [self.find_action, self.find_next_action,
self.find_previous_action, self.replace_action]:
child.setEnabled(False)
widget, textedit_properties = get_focus_widget_properties()
for action in self.editor.search_menu_actions:
action.setEnabled(self.editor.isAncestorOf(widget))
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
for action in [self.find_action, self.find_next_action,
self.find_previous_action]:
action.setEnabled(True)
self.replace_action.setEnabled(readwrite_editor)
self.replace_action.setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'inspector', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
if self.light:
menu = self.createPopupMenu()
else:
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
def hideEvent(self, event):
"""Reimplement Qt method"""
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
prefix = ('lightwindow' if self.light else 'window') + '/'
self.save_current_window_settings(prefix)
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
self.already_closed = True
if CONF.get('main', 'single_instance'):
self.open_files_server.close()
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = "maximize.png"
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = "unmaximize.png"
self.maximize_action.setText(text)
self.maximize_action.setIcon(get_icon(icon))
self.maximize_action.setToolTip(tip)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# No plugin is currently maximized: maximizing focus plugin
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = "window_nofullscreen.png"
else:
icon = "window_fullscreen.png"
self.fullscreen_action.setIcon(get_icon(icon))
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
revlink = " (<a href='http://code.google.com/p/spyderlib/source/"\
"detail?r=%s'>%s</a>)" % (
versions['revision'].split(':')[0].strip('+'),
versions['revision'])
QMessageBox.about(self,
_("About %s") % "Spyder",
"""<b>Spyder %s</b> %s
<br>The Scientific PYthon Development EnviRonment
<p>Copyright © 2009-2012 Pierre Raybaut
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut
<br>Developed and maintained by the
<a href="%s/people/list">Spyder Development Team</a>
<br>Many thanks to all the Spyder beta-testers and regular users.
<p>Most of the icons come from the Crystal Project
(© 2006-2007 Everaldo Coelho). Other icons by
<a href="http://p.yusukekamiyamane.com/"> Yusuke Kamiyamane</a>
(All rights reserved) and by
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a>.
<p>Spyder's community:
<ul><li>Bug reports and feature requests:
<a href="%s">Google Code</a>
</li><li>Discussions around the project:
<a href="%s">Google Group</a>
</li></ul>
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="http://code.google.com/p/pythonxy/">Python(x,y)</a> and
<a href="http://winpython.sourceforge.net/">WinPython</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s"""
% (versions['spyder'], revlink, __project_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system']))
def show_dependencies(self):
"""Show Spyder's Optional Dependencies dialog box"""
from spyderlib.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.show()
dlg.exec_()
def report_issue(self):
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
versions = get_versions()
# Get Mercurial revision for development version
revlink = ''
if versions['revision']:
full, short = versions['revision'].split(':')
full = full.strip('+')
if full:
revlink = " (%s:r%s)" % (short, full)
issue_template = """\
Spyder Version: %s%s
Python Version: %s
Qt Version : %s, %s %s on %s
%s
What steps will reproduce the problem?
1.
2.
3.
What is the expected output? What do you see instead?
Please provide any additional information below.
""" % (versions['spyder'],
revlink,
versions['python'],
versions['qt'],
versions['qt_api'],
versions['qt_api_ver'],
versions['system'],
dependencies.status())
url = QUrl("http://code.google.com/p/spyderlib/issues/entry")
url.addEncodedQueryItem("comment", quote(issue_template))
QDesktopServices.openUrl(url)
def google_group(self):
url = QUrl("http://groups.google.com/group/spyderlib")
QDesktopServices.openUrl(url)
#---- Global callbacks (called from plugins)
def get_current_editor_plugin(self):
"""Return editor plugin which has focus:
console, extconsole, editor, inspector or historylog"""
if self.light:
return self.extconsole
widget = QApplication.focusWidget()
from spyderlib.widgets.editor import TextEditBaseWidget
from spyderlib.widgets.shell import ShellBaseWidget
if not isinstance(widget, (TextEditBaseWidget, ShellBaseWidget)):
return
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
return plugin
else:
# External Editor window
plugin = widget
from spyderlib.widgets.editor import EditorWidget
while not isinstance(plugin, EditorWidget):
plugin = plugin.parent()
return plugin
def find(self):
"""Global find callback"""
plugin = self.get_current_editor_plugin()
if plugin is not None:
plugin.find_widget.show()
plugin.find_widget.search_text.setFocus()
return plugin
def find_next(self):
"""Global find next callback"""
plugin = self.get_current_editor_plugin()
if plugin is not None:
plugin.find_widget.find_next()
def find_previous(self):
"""Global find previous callback"""
plugin = self.get_current_editor_plugin()
if plugin is not None:
plugin.find_widget.find_previous()
def replace(self):
"""Global replace callback"""
plugin = self.find()
if plugin is not None:
plugin.find_widget.show_replace()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyderlib.widgets.editor import TextEditBaseWidget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
programs.run_python_script_in_terminal(fname, wdir, args,
interact, debug, python_args)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
else:
self.extconsole.visibility_changed(True)
self.extconsole.raise_()
self.extconsole.start(
fname=to_text_string(fname), wdir=to_text_string(wdir),
args=to_text_string(args), interact=interact,
debug=debug, python=python,
python_args=to_text_string(python_args) )
def execute_in_external_console(self, lines, focus_to_editor):
"""Execute lines in external or IPython console
and eventually set focus to editor"""
console = self.extconsole
if self.ipyconsole is None\
or self.last_console_plugin_focus_was_python:
console = self.extconsole
else:
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_python_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if ext in EDIT_EXT:
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT\
and ext in ('.spydata', '.mat', '.npy', '.h5'):
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
#---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
return self.path+self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
sys_path = sys.path
while sys_path[1] in self.get_spyder_pythonpath():
sys_path.pop(1)
def path_manager_callback(self):
"""Spyder path manager"""
from spyderlib.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_pathlist = self.projectexplorer.get_pythonpath()
dialog = PathManager(self, self.path, project_pathlist, sync=True)
self.connect(dialog, SIGNAL('redirect_stdio(bool)'),
self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
self.emit(SIGNAL("pythonpath_changed()"))
def pythonpath_changed(self):
"""Project Explorer PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projectexplorer.get_pythonpath()
self.add_path_to_sys_path()
self.emit(SIGNAL("pythonpath_changed()"))
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
qapp.setStyle(CONF.get('main', 'windows_style', self.default_style))
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features|QDockWidget.DockWidgetVerticalTitleBar
child.dockwidget.setFeatures(features)
child.update_margins()
self.apply_statusbar_settings()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyderlib.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
self.connect(dlg, SIGNAL("size_change(QSize)"),
lambda s: self.set_prefs_size(s))
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projectexplorer, self.extconsole, self.ipyconsole,
self.historylog, self.inspector, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
self.connect(dlg.pages_widget, SIGNAL("currentChanged(int)"),
self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
default=NoDefault):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut,
context, name, default) )
self.apply_shortcuts()
def remove_deprecated_shortcuts(self):
"""Remove deprecated shortcuts"""
data = [(context, name) for (qobject, context, name,
default) in self.shortcut_data]
remove_deprecated_shortcuts(data)
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
default) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name, default) )
try:
if isinstance(qobject, QAction):
qobject.setShortcut(keyseq)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
#---- Sessions
def load_session(self, filename=None):
"""Load session"""
if filename is None:
self.redirect_internalshell_stdio(False)
filename, _selfilter = getopenfilename(self, _("Open session"),
getcwd(), _("Spyder sessions")+" (*.session.tar)")
self.redirect_internalshell_stdio(True)
if not filename:
return
if self.close():
self.next_session_name = filename
def save_session(self):
"""Save session and quit application"""
self.redirect_internalshell_stdio(False)
filename, _selfilter = getsavefilename(self, _("Save session"),
getcwd(), _("Spyder sessions")+" (*.session.tar)")
self.redirect_internalshell_stdio(True)
if filename:
if self.close():
self.save_session_name = filename
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
if e.args[0] == eintr:
continue
raise
fname = req.recv(1024)
if not self.light:
fname = fname.decode('utf-8')
self.emit(SIGNAL('open_external_file(QString)'), fname)
req.sendall(b' ')
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
app = qapplication()
#----Monkey patching PyQt4.QtGui.QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from spyderlib.qt import QtGui
QtGui.QApplication = FakeQApplication
#----Monkey patching rope
try:
from spyderlib import rope_patch
rope_patch.apply()
except ImportError:
# rope 0.9.2/0.9.3 is not installed
pass
#----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
#----Monkey patching rope (if installed)
# Compatibility with new Mercurial API (>= 1.3).
# New versions of rope (> 0.9.2) already handle this issue
try:
import rope
if rope.VERSION == '0.9.2':
import rope.base.fscommands
class MercurialCommands(rope.base.fscommands.MercurialCommands):
def __init__(self, root):
self.hg = self._import_mercurial()
self.normal_actions = rope.base.fscommands.FileSystemCommands()
try:
self.ui = self.hg.ui.ui(
verbose=False, debug=False, quiet=True,
interactive=False, traceback=False,
report_untrusted=False)
except:
self.ui = self.hg.ui.ui()
self.ui.setconfig('ui', 'interactive', 'no')
self.ui.setconfig('ui', 'debug', 'no')
self.ui.setconfig('ui', 'traceback', 'no')
self.ui.setconfig('ui', 'verbose', 'no')
self.ui.setconfig('ui', 'report_untrusted', 'no')
self.ui.setconfig('ui', 'quiet', 'yes')
self.repo = self.hg.hg.repository(self.ui, root)
rope.base.fscommands.MercurialCommands = MercurialCommands
except ImportError:
pass
return app
class Spy(object):
"""Inspect Spyder internals"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Patch matplotlib for figure integration
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Open external files with our Mac app
if sys.platform == "darwin" and 'Spyder.app' in __file__:
main.connect(app, SIGNAL('open_external_file(QString)'),
lambda fname: main.open_external_file(fname))
# To give focus again to the last focused widget after restoring
# the window
main.connect(app, SIGNAL('focusChanged(QWidget*, QWidget*)'),
main.change_last_focused_widget)
app.exec_()
return main
def __remove_temp_session():
if osp.isfile(TEMP_SESSION_PATH):
os.remove(TEMP_SESSION_PATH)
#==============================================================================
# Main
#==============================================================================
def main():
"""Session manager"""
__remove_temp_session()
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, optparse won't be able to exit if --help option is passed
options, args = get_options()
if set_attached_console_visible is not None:
set_attached_console_visible(DEBUG or options.show_console\
or options.reset_session\
or options.reset_to_defaults\
or options.optimize)
app = initialize()
if options.reset_session:
# <!> Remove all configuration files!
reset_session()
# CONF.reset_to_defaults(save=True)
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyderlib
programs.run_python_script(module="compileall",
args=[spyderlib.__path__[0]], p_args=['-O'])
return
if CONF.get('main', 'crash', False):
CONF.set('main', 'crash', False)
SPLASH.hide()
QMessageBox.information(None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>python spyder --reset"
"</b></span><br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If restoring the default settings does not help, please take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"eventually creating a new issue <a href=\"%s\">here</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __project_url__,
__forum_url__, __project_url__))
next_session_name = options.startup_session
while is_text_string(next_session_name):
if next_session_name:
error_message = load_session(next_session_name)
if next_session_name == TEMP_SESSION_PATH:
__remove_temp_session()
if error_message is None:
CONF.load_from_ini()
else:
print(error_message)
QMessageBox.critical(None, "Load session",
u("<b>Unable to load '%s'</b><br><br>Error message:<br>%s")
% (osp.basename(next_session_name), error_message))
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
SPLASH.hide()
return
next_session_name = mainwindow.next_session_name
save_session_name = mainwindow.save_session_name
if next_session_name is not None:
#-- Loading session
# Saving current session in a temporary file
# but only if we are not currently trying to reopen it!
if next_session_name != TEMP_SESSION_PATH:
save_session_name = TEMP_SESSION_PATH
if save_session_name:
#-- Saving session
error_message = save_session(save_session_name)
if error_message is not None:
QMessageBox.critical(None, "Save session",
u("<b>Unable to save '%s'</b><br><br>Error message:<br>%s")
% (osp.basename(save_session_name), error_message))
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
apache-2.0
|
harisbal/pandas
|
pandas/io/formats/printing.py
|
6
|
13133
|
"""
printing tools
"""
import sys
from pandas.core.dtypes.inference import is_sequence
from pandas import compat
from pandas.compat import u
from pandas.core.config import get_option
def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop('strlen', len)
justfunc = kwargs.pop('justfunc', justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode='left')
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("{{{body}}}")
else:
fmt = u("[{body}]") if hasattr(seq, '__setitem__') else u("({body})")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(
next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt.format(body=body)
def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{{{things}}}")
pairs = []
pfmt = u("{key}: {val}")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs))
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
max_seq_items=max_seq_items)
elif (is_sequence(thing) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = u("'{thing}'")
else:
fmt = u("u'{thing}'")
result = fmt.format(thing=as_escaped_unicode(thing))
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
def _enable_data_resource_formatter(enable):
if 'IPython' not in sys.modules:
# definitely not in IPython
return
from IPython import get_ipython
ip = get_ipython()
if ip is None:
# still not in IPython
return
formatters = ip.display_formatter.formatters
mimetype = "application/vnd.dataresource+json"
if enable:
if mimetype not in formatters:
# define tableschema formatter
from IPython.core.formatters import BaseFormatter
class TableSchemaFormatter(BaseFormatter):
print_method = '_repr_data_resource_'
_return_type = (dict,)
# register it:
formatters[mimetype] = TableSchemaFormatter()
# enable it if it's been disabled:
formatters[mimetype].enabled = True
else:
# unregister tableschema mime-type
if mimetype in formatters:
formatters[mimetype].enabled = False
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
def format_object_summary(obj, formatter, is_justify=True, name=None):
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optiona
defaults to the class name of the obj
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
if name is None:
name = obj.__class__.__name__
space1 = "\n%s" % (' ' * (len(name) + 1))
space2 = "\n%s" % (' ' * (len(name) + 2))
n = len(obj)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max(adj.len(x) for x in values)
else:
return 0
if n == 0:
summary = '[], '
elif n == 1:
first = formatter(obj[0])
summary = '[%s], ' % first
elif n == 2:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = '[%s, %s], ' % (first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
summary += '],'
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary
def format_object_attrs(obj):
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
must be iterable
Returns
-------
list
"""
attrs = []
if hasattr(obj, 'dtype'):
attrs.append(('dtype', "'{}'".format(obj.dtype)))
if getattr(obj, 'name', None) is not None:
attrs.append(('name', default_pprint(obj.name)))
max_seq_items = get_option('display.max_seq_items') or len(obj)
if len(obj) > max_seq_items:
attrs.append(('length', len(obj)))
return attrs
|
bsd-3-clause
|
derekjchow/models
|
research/lfads/synth_data/synthetic_data_utils.py
|
6
|
11355
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
from __future__ import print_function
import h5py
import numpy as np
import os
from utils import write_datasets
import matplotlib
import matplotlib.pyplot as plt
import scipy.signal
def generate_rnn(rng, N, g, tau, dt, max_firing_rate):
"""Create a (vanilla) RNN with a bunch of hyper parameters for generating
chaotic data.
Args:
rng: numpy random number generator
N: number of hidden units
g: scaling of recurrent weight matrix in g W, with W ~ N(0,1/N)
tau: time scale of individual unit dynamics
dt: time step for equation updates
max_firing_rate: how to resecale the -1,1 firing rates
Returns:
the dictionary of these parameters, plus some others.
"""
rnn = {}
rnn['N'] = N
rnn['W'] = rng.randn(N,N)/np.sqrt(N)
rnn['Bin'] = rng.randn(N)/np.sqrt(1.0)
rnn['Bin2'] = rng.randn(N)/np.sqrt(1.0)
rnn['b'] = np.zeros(N)
rnn['g'] = g
rnn['tau'] = tau
rnn['dt'] = dt
rnn['max_firing_rate'] = max_firing_rate
mfr = rnn['max_firing_rate'] # spikes / sec
nbins_per_sec = 1.0/rnn['dt'] # bins / sec
# Used for plotting in LFADS
rnn['conversion_factor'] = mfr / nbins_per_sec # spikes / bin
return rnn
def generate_data(rnn, T, E, x0s=None, P_sxn=None, input_magnitude=0.0,
input_times=None):
""" Generates data from an randomly initialized RNN.
Args:
rnn: the rnn
T: Time in seconds to run (divided by rnn['dt'] to get steps, rounded down.
E: total number of examples
S: number of samples (subsampling N)
Returns:
A list of length E of NxT tensors of the network being run.
"""
N = rnn['N']
def run_rnn(rnn, x0, ntime_steps, input_time=None):
rs = np.zeros([N,ntime_steps])
x_tm1 = x0
r_tm1 = np.tanh(x0)
tau = rnn['tau']
dt = rnn['dt']
alpha = (1.0-dt/tau)
W = dt/tau*rnn['W']*rnn['g']
Bin = dt/tau*rnn['Bin']
Bin2 = dt/tau*rnn['Bin2']
b = dt/tau*rnn['b']
us = np.zeros([1, ntime_steps])
for t in range(ntime_steps):
x_t = alpha*x_tm1 + np.dot(W,r_tm1) + b
if input_time is not None and t == input_time:
us[0,t] = input_magnitude
x_t += Bin * us[0,t] # DCS is this what was used?
r_t = np.tanh(x_t)
x_tm1 = x_t
r_tm1 = r_t
rs[:,t] = r_t
return rs, us
if P_sxn is None:
P_sxn = np.eye(N)
ntime_steps = int(T / rnn['dt'])
data_e = []
inputs_e = []
for e in range(E):
input_time = input_times[e] if input_times is not None else None
r_nxt, u_uxt = run_rnn(rnn, x0s[:,e], ntime_steps, input_time)
r_sxt = np.dot(P_sxn, r_nxt)
inputs_e.append(u_uxt)
data_e.append(r_sxt)
S = P_sxn.shape[0]
data_e = normalize_rates(data_e, E, S)
return data_e, x0s, inputs_e
def normalize_rates(data_e, E, S):
# Normalization, made more complex because of the P matrices.
# Normalize by min and max in each channel. This normalization will
# cause offset differences between identical rnn runs, but different
# t hits.
for e in range(E):
r_sxt = data_e[e]
for i in range(S):
rmin = np.min(r_sxt[i,:])
rmax = np.max(r_sxt[i,:])
assert rmax - rmin != 0, 'Something wrong'
r_sxt[i,:] = (r_sxt[i,:] - rmin)/(rmax-rmin)
data_e[e] = r_sxt
return data_e
def spikify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply spikes to a continuous dataset whose values are between 0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
spikified_e: a list of length b of the data represented as spikes,
sampled from the underlying poisson process.
"""
E = len(data_e)
spikes_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
data_s = np.zeros([N,T]).astype(np.int)
for n in range(N):
f = data[n,:]
s = rng.poisson(f*max_firing_rate*dt, size=T)
data_s[n,:] = s
spikes_e.append(data_s)
return spikes_e
def gaussify_data(data_e, rng, dt=1.0, max_firing_rate=100):
""" Apply gaussian noise to a continuous dataset whose values are between
0.0 and 1.0
Args:
data_e: nexamples length list of NxT trials
dt: how often the data are sampled
max_firing_rate: the firing rate that is associated with a value of 1.0
Returns:
gauss_e: a list of length b of the data with noise.
"""
E = len(data_e)
mfr = max_firing_rate
gauss_e = []
for e in range(E):
data = data_e[e]
N,T = data.shape
noisy_data = data * mfr + np.random.randn(N,T) * (5.0*mfr) * np.sqrt(dt)
gauss_e.append(noisy_data)
return gauss_e
def get_train_n_valid_inds(num_trials, train_fraction, nreplications):
"""Split the numbers between 0 and num_trials-1 into two portions for
training and validation, based on the train fraction.
Args:
num_trials: the number of trials
train_fraction: (e.g. .80)
nreplications: the number of spiking trials per initial condition
Returns:
a 2-tuple of two lists: the training indices and validation indices
"""
train_inds = []
valid_inds = []
for i in range(num_trials):
# This line divides up the trials so that within one initial condition,
# the randomness of spikifying the condition is shared among both
# training and validation data splits.
if (i % nreplications)+1 > train_fraction * nreplications:
valid_inds.append(i)
else:
train_inds.append(i)
return train_inds, valid_inds
def split_list_by_inds(data, inds1, inds2):
"""Take the data, a list, and split it up based on the indices in inds1 and
inds2.
Args:
data: the list of data to split
inds1, the first list of indices
inds2, the second list of indices
Returns: a 2-tuple of two lists.
"""
if data is None or len(data) == 0:
return [], []
else:
dout1 = [data[i] for i in inds1]
dout2 = [data[i] for i in inds2]
return dout1, dout2
def nparray_and_transpose(data_a_b_c):
"""Convert the list of items in data to a numpy array, and transpose it
Args:
data: data_asbsc: a nested, nested list of length a, with sublist length
b, with sublist length c.
Returns:
a numpy 3-tensor with dimensions a x c x b
"""
data_axbxc = np.array([datum_b_c for datum_b_c in data_a_b_c])
data_axcxb = np.transpose(data_axbxc, axes=[0,2,1])
return data_axcxb
def add_alignment_projections(datasets, npcs, ntime=None, nsamples=None):
"""Create a matrix that aligns the datasets a bit, under
the assumption that each dataset is observing the same underlying dynamical
system.
Args:
datasets: The dictionary of dataset structures.
npcs: The number of pcs for each, basically like lfads factors.
nsamples (optional): Number of samples to take for each dataset.
ntime (optional): Number of time steps to take in each sample.
Returns:
The dataset structures, with the field alignment_matrix_cxf added.
This is # channels x npcs dimension
"""
nchannels_all = 0
channel_idxs = {}
conditions_all = {}
nconditions_all = 0
for name, dataset in datasets.items():
cidxs = np.where(dataset['P_sxn'])[1] # non-zero entries in columns
channel_idxs[name] = [cidxs[0], cidxs[-1]+1]
nchannels_all += cidxs[-1]+1 - cidxs[0]
conditions_all[name] = np.unique(dataset['condition_labels_train'])
all_conditions_list = \
np.unique(np.ndarray.flatten(np.array(conditions_all.values())))
nconditions_all = all_conditions_list.shape[0]
if ntime is None:
ntime = dataset['train_data'].shape[1]
if nsamples is None:
nsamples = dataset['train_data'].shape[0]
# In the data workup in the paper, Chethan did intra condition
# averaging, so let's do that here.
avg_data_all = {}
for name, conditions in conditions_all.items():
dataset = datasets[name]
avg_data_all[name] = {}
for cname in conditions:
td_idxs = np.argwhere(np.array(dataset['condition_labels_train'])==cname)
data = np.squeeze(dataset['train_data'][td_idxs,:,:], axis=1)
avg_data = np.mean(data, axis=0)
avg_data_all[name][cname] = avg_data
# Visualize this in the morning.
all_data_nxtc = np.zeros([nchannels_all, ntime * nconditions_all])
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
for cname in conditions_all[name]:
cidxs = np.argwhere(all_conditions_list == cname)
if cidxs.shape[0] > 0:
cidx = cidxs[0][0]
all_tidxs = np.arange(0, ntime+1) + cidx*ntime
all_data_nxtc[cidx_s:cidx_f, all_tidxs[0]:all_tidxs[-1]] = \
avg_data_all[name][cname].T
# A bit of filtering. We don't care about spectral properties, or
# filtering artifacts, simply correlate time steps a bit.
filt_len = 6
bc_filt = np.ones([filt_len])/float(filt_len)
for c in range(nchannels_all):
all_data_nxtc[c,:] = scipy.signal.filtfilt(bc_filt, [1.0], all_data_nxtc[c,:])
# Compute the PCs.
all_data_mean_nx1 = np.mean(all_data_nxtc, axis=1, keepdims=True)
all_data_zm_nxtc = all_data_nxtc - all_data_mean_nx1
corr_mat_nxn = np.dot(all_data_zm_nxtc, all_data_zm_nxtc.T)
evals_n, evecs_nxn = np.linalg.eigh(corr_mat_nxn)
sidxs = np.flipud(np.argsort(evals_n)) # sort such that 0th is highest
evals_n = evals_n[sidxs]
evecs_nxn = evecs_nxn[:,sidxs]
# Project all the channels data onto the low-D PCA basis, where
# low-d is the npcs parameter.
all_data_pca_pxtc = np.dot(evecs_nxn[:, 0:npcs].T, all_data_zm_nxtc)
# Now for each dataset, we regress the channel data onto the top
# pcs, and this will be our alignment matrix for that dataset.
# |B - A*W|^2
for name, dataset in datasets.items():
cidx_s = channel_idxs[name][0]
cidx_f = channel_idxs[name][1]
all_data_zm_chxtc = all_data_zm_nxtc[cidx_s:cidx_f,:] # ch for channel
W_chxp, _, _, _ = \
np.linalg.lstsq(all_data_zm_chxtc.T, all_data_pca_pxtc.T)
dataset['alignment_matrix_cxf'] = W_chxp
alignment_bias_cx1 = all_data_mean_nx1[cidx_s:cidx_f]
dataset['alignment_bias_c'] = np.squeeze(alignment_bias_cx1, axis=1)
do_debug_plot = False
if do_debug_plot:
pc_vecs = evecs_nxn[:,0:npcs]
ntoplot = 400
plt.figure()
plt.plot(np.log10(evals_n), '-x')
plt.figure()
plt.subplot(311)
plt.imshow(all_data_pca_pxtc)
plt.colorbar()
plt.subplot(312)
plt.imshow(np.dot(W_chxp.T, all_data_zm_chxtc))
plt.colorbar()
plt.subplot(313)
plt.imshow(np.dot(all_data_zm_chxtc.T, W_chxp).T - all_data_pca_pxtc)
plt.colorbar()
import pdb
pdb.set_trace()
return datasets
|
apache-2.0
|
arbuz001/sms-tools
|
workspace/A6/A6Part2.py
|
2
|
12166
|
import os
import sys
import numpy as np
import math
from scipy.signal import get_window
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import utilFunctions as UF
import harmonicModel as HM
import stft
eps = np.finfo(float).eps
#sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../A2/'))
#import A2Part1
"""
A6Part2 - Segmentation of stable note regions in an audio signal
Complete the function segmentStableNotesRegions() to identify the stable regions of notes in a specific
monophonic audio signal. The function returns an array of segments where each segment contains the
starting and the ending frame index of a stable note.
The input argument to the function are the wav file name including the path (inputFile), threshold to
be used for deciding stable notes (stdThsld) in cents, minimum allowed duration of a stable note (minNoteDur),
number of samples to be considered for computing standard deviation (winStable), analysis window (window),
window size (M), FFT size (N), hop size (H), error threshold used in the f0 detection (f0et), magnitude
threshold for spectral peak picking (t), minimum allowed f0 (minf0) and maximum allowed f0 (maxf0).
The function returns a numpy array of shape (k,2), where k is the total number of detected segments.
The two columns in each row contains the starting and the ending frame indexes of a stable note segment.
The segments must be returned in the increasing order of their start times.
In order to facilitate the assignment we have configured the input parameters to work with a particular
sound, '../../sounds/sax-phrase-short.wav'. The code and parameters to estimate the fundamental frequency
is completed. Thus you start from an f0 curve obtained using the f0Detection() function and you will use
that to obtain the note segments.
All the steps to be implemented in order to solve this question are indicated in segmentStableNotesRegions()
as comments. These are the steps:
1. In order to make the processing musically relevant, the f0 values should be converted first from
Hertz to Cents, which is a logarithmic scale.
2. At each time frame (for each f0 value) you should compute the standard deviation of the past winStable
number of f0 samples (including the f0 sample at the current audio frame).
3. You should then apply a deviation threshold, stdThsld, to determine if the current frame belongs
to a stable note region or not. Since we are interested in the stable note regions, the standard
deviation of the previous winStable number of f0 samples (including the current sample) should be less
than stdThsld i.e. use the current sample and winStable-1 previous samples. Ignore the first winStable-1
samples in this computation.
4. All the consecutive frames belonging to the stable note regions should be grouped together into
segments. For example, if the indexes of the frames corresponding to the stable note regions are
3,4,5,6,12,13,14, we get two segments, first 3-6 and second 12-14.
5. After grouping frame indexes into segments filter/remove the segments which are smaller in duration
than minNoteDur. Return the segment indexes in the increasing order of their start frame index.
Test case 1: Using inputFile='../../sounds/cello-phrase.wav', stdThsld=10, minNoteDur=0.1,
winStable = 3, window='hamming', M=1025, N=2048, H=256, f0et=5.0, t=-100, minf0=310, maxf0=650,
the function segmentStableNotesRegions() should return 9 segments. Please use loadTestcases.load()
to check the expected segment indexes in the output.
Test case 2: Using inputFile='../../sounds/cello-phrase.wav', stdThsld=20, minNoteDur=0.5,
winStable = 3, window='hamming', M=1025, N=2048, H=256, f0et=5.0, t=-100, minf0=310, maxf0=650,
the function segmentStableNotesRegions() should return 6 segments. Please use loadTestcases.load()
to check the expected segment indexes in the output.
Test case 3: Using inputFile='../../sounds/sax-phrase-short.wav', stdThsld=5, minNoteDur=0.6,
winStable = 3, window='hamming', M=1025, N=2048, H=256, f0et=5.0, t=-100, minf0=310, maxf0=650,
the function segmentStableNotesRegions() should return just one segment. Please use loadTestcases.load()
to check the expected segment indexes in the output.
We also provide the function plotSpectogramF0Segments() to plot the f0 contour and the detected
segments on the top of the spectrogram of the audio signal in order to visually analyse the outcome
of your function. Depending on the analysis parameters and the capabilities of the hardware you
use, the function might take a while to run (even half a minute in some cases).
"""
def segmentStableNotesRegions(inputFile = '../../sounds/sax-phrase-short.wav', stdThsld=10, minNoteDur=0.1,
winStable = 3, window='hamming', M=1024, N=2048, H=256, f0et=5.0, t=-100,
minf0=310, maxf0=650):
"""
Function to segment the stable note regions in an audio signal
Input:
inputFile (string): wav file including the path
stdThsld (float): threshold for detecting stable regions in the f0 contour (in cents)
minNoteDur (float): minimum allowed segment length (note duration)
winStable (integer): number of samples used for computing standard deviation
window (string): analysis window
M (integer): window size used for computing f0 contour
N (integer): FFT size used for computing f0 contour
H (integer): Hop size used for computing f0 contour
f0et (float): error threshold used for the f0 computation
t (float): magnitude threshold in dB used in spectral peak picking
minf0 (float): minimum fundamental frequency in Hz
maxf0 (float): maximum fundamental frequency in Hz
Output:
segments (np.ndarray): Numpy array containing starting and ending frame indexes of every segment.
"""
fs, x = UF.wavread(inputFile) # reading inputFile
w = get_window(window, M) # obtaining analysis window
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) # estimating F0
# 1. convert f0 values from Hz to Cents (as described in pdf document)
f0_in_cents = 1200.0*np.log2(f0/55.0 + eps)
#2. create an array containing standard deviation of last winStable samples
std_F0 = (stdThsld + eps)*np.ones(winStable - 1,dtype = float)
for i in range(winStable - 1, len(f0_in_cents)):
std_F0 = np.append(std_F0,np.std(f0_in_cents[(i - winStable + 1):(i + 1)]))
# print 'step = ' + str(i)
# print 'nBinLow = ' + str(i - winStable + 1)
# print 'nBinHigh = ' + str(i + 1)
# print 'Values = ' + str(f0_in_cents[(i - winStable + 1):(i + 1)])
# print '****'
#3. apply threshold on standard deviation values to find indexes of the stable points in melody
idx = np.where(std_F0 < stdThsld)[0]
# idx = np.array([3, 4, 5, 6, 12, 13, 17, 18, 19])
#4. create segments of continuous stable points such that consecutive stable points belong to same segment
idx_Start = np.array([],dtype=np.int64)
idx_End = np.array([],dtype=np.int64)
pointer_Start = 0
pointer_End = 0
for i in range(0, len(idx)-1):
# print 'pointer_Start = ' + str(pointer_Start)
# print 'pointer_End = ' + str(pointer_End)
# print '****'
if((idx[i+1] - idx[i]) != 1):
idx_Start = np.append(idx_Start,pointer_Start)
idx_End = np.append(idx_End,pointer_End)
pointer_End += 1
pointer_Start = (i+1)
else:
pointer_End += 1
idx_Start = np.append(idx_Start,pointer_Start)
idx_End = np.append(idx_End,pointer_End)
#5. apply segment filtering, i.e. remove segments with are < minNoteDur in length
idx_segments = np.where((idx_End - idx_Start + 1)*H/float(fs) >= minNoteDur)[0]
segments_Start = idx[idx_Start[idx_segments]]
segments_End = idx[idx_End[idx_segments]]
segments = np.array([segments_Start,segments_End])
segments = np.transpose(segments)
#plotSpectogramF0Segments(x, fs, w, N, H, f0, segments)
return segments
def plotSpectogramF0Segments(x, fs, w, N, H, f0, segments):
"""
Code for plotting the f0 contour on top of the spectrogram
"""
# frequency range to plot
maxplotfreq = 1000.0
fontSize = 16
fig = plt.figure()
ax = fig.add_subplot(111)
mX, pX = stft.stftAnal(x, fs, w, N, H) #using same params as used for analysis
mX = np.transpose(mX[:,:int(N*(maxplotfreq/fs))+1])
timeStamps = np.arange(mX.shape[1])*H/float(fs)
binFreqs = np.arange(mX.shape[0])*fs/float(N)
plt.pcolormesh(timeStamps, binFreqs, mX)
plt.plot(timeStamps, f0, color = 'k', linewidth=5)
for ii in range(segments.shape[0]):
plt.plot(timeStamps[segments[ii,0]:segments[ii,1]], f0[segments[ii,0]:segments[ii,1]], color = '#A9E2F3', linewidth=1.5)
plt.autoscale(tight=True)
plt.ylabel('Frequency (Hz)', fontsize = fontSize)
plt.xlabel('Time (s)', fontsize = fontSize)
plt.legend(('f0','segments'))
xLim = ax.get_xlim()
yLim = ax.get_ylim()
ax.set_aspect((xLim[1]-xLim[0])/(2.0*(yLim[1]-yLim[0])))
plt.autoscale(tight=True)
plt.show()
#inputFile = '../../sounds/sax-phrase-short.wav'
#stdThsld=5
#minNoteDur=0.6
#winStable = 3
#window='hamming'
#M=1025
#N=2048
#H=256
#f0et=5.0
#t=-100
#minf0=310
#maxf0=650
#inputFile = '../../sounds/cello-phrase.wav'
#stdThsld=20
#minNoteDur=0.5
#winStable = 3
#window='hamming'
#M=1025
#N=2048
#H=256
#f0et=5.0
#t=-100
#minf0=310
#maxf0=650
##z = segmentStableNotesRegions(inputFile, stdThsld, minNoteDur, winStable, window, M, N, H, f0et, t, minf0 , maxf0)
#fs, x = UF.wavread(inputFile) # reading inputFile
#w = get_window(window, M) # obtaining analysis window
#f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) # estimating F0
## 1. convert f0 values from Hz to Cents (as described in pdf document)
#f0_in_cents = 1200.0*np.log2(f0/55.0 + eps)
##2. create an array containing standard deviation of last winStable samples
#std_F0 = (stdThsld + eps)*np.ones(winStable - 1,dtype = float)
#for i in range(winStable - 1, len(f0_in_cents)):
# std_F0 = np.append(std_F0,np.std(f0_in_cents[(i - winStable + 1):(i + 1)]))
## print 'step = ' + str(i)
## print 'nBinLow = ' + str(i - winStable + 1)
## print 'nBinHigh = ' + str(i + 1)
## print 'Values = ' + str(f0_in_cents[(i - winStable + 1):(i + 1)])
## print '****'
##3. apply threshold on standard deviation values to find indexes of the stable points in melody
#idx = np.where(std_F0 < stdThsld)[0]
## idx = np.array([3, 4, 5, 6, 12, 13, 17, 18, 19])
##4. create segments of continuous stable points such that consecutive stable points belong to same segment
#idx_Start = np.array([],dtype=np.int64)
#idx_End = np.array([],dtype=np.int64)
#pointer_Start = 0
#pointer_End = 0
#for i in range(0, len(idx)-1):
## print 'pointer_Start = ' + str(pointer_Start)
## print 'pointer_End = ' + str(pointer_End)
## print '****'
#
# if((idx[i+1] - idx[i]) != 1):
# idx_Start = np.append(idx_Start,pointer_Start)
# idx_End = np.append(idx_End,pointer_End)
# pointer_End += 1
# pointer_Start = (i+1)
# else:
# pointer_End += 1
#
#idx_Start = np.append(idx_Start,pointer_Start)
#idx_End = np.append(idx_End,pointer_End)
##5. apply segment filtering, i.e. remove segments with are < minNoteDur in length
#idx_segments = np.where((idx_End - idx_Start + 1)*H/float(fs) >= minNoteDur)[0]
#segments_Start = idx[idx_Start[idx_segments]]
#segments_End = idx[idx_End[idx_segments]]
#segments = np.array([segments_Start,segments_End])
#segments = np.transpose(segments)
#plotSpectogramF0Segments(x, fs, w, N, H, f0, segments)
|
agpl-3.0
|
SANDAG/defm
|
pydefm/compute.py
|
2
|
9617
|
import numpy as np
import pandas as pd
# non-migrating population = base population - out migration
# in migrating population = in migrating rates applied to base population
# dead population = death rates applied to non-migrating population
# non-migrating survived population = non-migrating population - deaths
# newborns = birth rates applied to non-migrating population
# aged pop = non-migrating survived population is aged
# new pop = newborns + in migrating population
# final pop = new pop + aged pop
def in_migrating_population(pop):
pop = pop[(pop['type'] == 'HHP') & (pop['mildep'] == 'N')]
pop = pop.fillna(0)
pop['mig_Din'] = (pop['persons'] * pop['DIN']).round()
pop['mig_Fin'] = (pop['persons'] * pop['FIN']).round()
pop = pop[['mig_Din', 'mig_Fin']]
return pop
def out_migrating_population(pop):
pop = pop[(pop['type'] == 'HHP') & (pop['mildep'] == 'N')]
pop = pop.fillna(0)
pop['mig_Dout'] = (pop['persons'] * pop['DOUT']).round()
pop['mig_Fout'] = (pop['persons'] * pop['FOUT']).round()
pop = pop[['mig_Dout', 'mig_Fout']]
return pop
def non_migrating_population(pop, out_pop):
pop = pop.join(out_pop, how='left')
pop.loc[pop['type'].isin(['COL', 'INS', 'MIL', 'OTH']),
['mig_Dout', 'mig_Fout']] = 0
pop.loc[pop['mildep'].isin(['Y']), ['mig_Dout', 'mig_Fout']] = 0
pop['non_mig_pop'] = (pop['persons'] - pop['mig_Dout'] -
pop['mig_Fout']).round()
return pop
def dead_population(pop):
# pop = pop[(pop['type'] == 'HHP') & (pop['mildep'] == 'N')]
pop = pop.fillna(1) # for age 101, no rates
pop['deaths'] = (pop['non_mig_pop'] * pop['death_rate']).round()
pop = pop.reset_index(drop=False)
pop['mild_type'] = pop[['type', 'mildep']].apply(lambda x: '_'.join(x), axis=1)
pop = pd.pivot_table(pop, values='deaths',
index=['age', 'race_ethn', 'sex'],
columns=['mild_type'])
pop.rename(columns={'HHP_N': 'deaths_hhp_non_mil'}, inplace=True)
return pop
'''
def new_born_population(pop):
pop['new_born'] = (pop['non_mig_pop'] * pop['death_rate']).round()
return pop[['deaths']]
'''
def non_migrating_survived_pop(pop, deaths):
pop = pop.join(deaths, how='left')
pop.loc[pop['type'].isin(['COL', 'INS', 'MIL', 'OTH']), ['deaths_hhp_non_mil']] = 0
pop.loc[pop['mildep'].isin(['Y']), ['deaths_hhp_non_mil']] = 0
pop['non_mig_survived_pop'] = (pop['non_mig_pop'] - pop['deaths_hhp_non_mil']).round()
return pop
def aged_pop(non_mig_survived_pop):
non_mig_survived_pop['increment'] = 1
# sum newborn population across cohorts
non_mig_survived_pop = non_mig_survived_pop.\
reset_index(level=['age', 'race_ethn', 'sex'])
non_mig_survived_pop.loc[non_mig_survived_pop['type'].isin(['COL',
'INS',
'MIL',
'OTH']),
['increment']] = 0
non_mig_survived_pop.loc[non_mig_survived_pop['mildep'].isin(['Y']),
['increment']] = 0
temp = non_mig_survived_pop[(non_mig_survived_pop['increment'] == 1) &
(non_mig_survived_pop['age'] == 0)]
temp = temp.assign(non_mig_survived_pop=0)
non_mig_survived_pop['age'] = non_mig_survived_pop['age'] + \
non_mig_survived_pop['increment']
non_mig_survived_pop = non_mig_survived_pop.append(temp)
temp_2 = non_mig_survived_pop[(non_mig_survived_pop['age'] >= 101)]
temp_2_p = pd.DataFrame(temp_2['non_mig_survived_pop'].groupby(
[temp_2['race_ethn'], temp_2['sex'],
temp_2['type'], temp_2['mildep']]).sum())
temp_2_h = pd.DataFrame(
temp_2['households'].groupby([temp_2['race_ethn'],
temp_2['sex'],
temp_2['type'],
temp_2['mildep']]).sum())
temp_2_p = temp_2_p.join(temp_2_h)
temp_2_p = temp_2_p.assign(age=101)
temp_2_p = temp_2_p.reset_index(drop=False)
non_mig_survived_pop = non_mig_survived_pop[
['age', 'race_ethn', 'sex', 'type',
'mildep', 'non_mig_survived_pop', 'households']]
non_mig_survived_pop = non_mig_survived_pop[non_mig_survived_pop.age < 101]
non_mig_survived_pop = non_mig_survived_pop.append(temp_2_p)
non_mig_survived_pop.fillna(0)
non_mig_survived_pop = non_mig_survived_pop.\
set_index(['age', 'race_ethn', 'sex'])
return non_mig_survived_pop
def new_population(new_pop):
new_pop['new_pop'] = new_pop['mig_Din'] +\
new_pop['mig_Fin'] + new_pop['new_born_survived']
return new_pop[['new_pop']]
def final_population(pop):
pop['persons1'] = pop['non_mig_survived_pop'] + pop['new_pop']
pop2 = pop[(pop['type'] == 'HHP')]
pop2 = pop2.reset_index(drop=False)
pop2 = pd.DataFrame(pop2['persons1'].groupby(
[pop2['age'], pop2['race_ethn'], pop2['sex']]).sum())
pop2.rename(columns={'persons1': 'persons_sum1'}, inplace=True)
pop = pop.join(pop2)
pop['persons'] = np.where(pop['type'].
isin(['INS', 'OTH']),
(pop['persons_sum1'] * pop['rates']).round(),
pop['persons1'])
return pop[['type', 'mildep', 'persons', 'households']]
def births_all(b_df, rand_df=None, pop_col='persons'):
"""
Calculate births for given year based on rates.
Predict male births as 51% of all births & female births as 49%.
Result is nearest integer (floor) after +0 or +0.5 (randomly generated)
Parameters
----------
b_df : pandas.DataFrame
with population for current yr and birth rates
rand_df : pandas.DataFrame
with random numbers
pop_col : string
column name from which births to be calculated
Returns
-------
b_df : pandas DataFrame
male and female births by cohort (race_ethn and age)
"""
# total births = population * birth rate (fill blanks w zero)
b_df['births_rounded'] = (b_df[pop_col] *
b_df['birth_rate']).round()
# b_df = b_df.round({'births_rounded': 0})
# male births 51%
b_df['births_m_float'] = b_df['births_rounded'] * 0.51
b_df = b_df.join(rand_df)
# Add random 0 or 0.5
# Convert to int which truncates float (floor)
b_df['births_m'] = b_df['births_m_float'] + b_df['random_number']
b_df['births_m'] = b_df['births_m'].astype('int64')
# female births
b_df['births_f'] = (b_df['births_rounded'] - b_df['births_m']).round()
return b_df
def births_sum(df, sim_year):
"""
Sum births over all the ages in a given cohort
Set birth age to zero and reset DataFrame index
Parameters
----------
df : pandas DataFrame
male and female births for each cohort and non-migrating population
db_id : int
primary key for current simulation
sim_year : int
year being simulated
Returns
-------
births_age0 : pandas DataFrame
births summed across age for each cohort
"""
df = df.reset_index(drop=False)
df = df[['yr', 'race_ethn', 'mildep', 'type', 'births_m', 'births_f']]
births_grouped = df.groupby(['yr', 'race_ethn', 'mildep', 'type'],
as_index=False).sum()
male_births = births_grouped.copy()
male_births.rename(columns={'births_m': 'persons'}, inplace=True)
male_births['sex'] = 'M'
male_births['age'] = 0
male_births = male_births.set_index(['age', 'race_ethn', 'sex'])
male_births = male_births.drop('births_f', 1)
female_births = births_grouped.copy()
female_births.rename(columns={'births_f': 'persons'}, inplace=True)
female_births['sex'] = 'F'
female_births['age'] = 0
female_births = female_births.set_index(['age', 'race_ethn', 'sex'])
female_births = female_births.drop('births_m', 1)
births_mf = pd.concat([male_births, female_births], axis=0)
births_mf['households'] = 0 # temp ignore households
# no births for this special case
births_mf = births_mf[-births_mf['type'].isin(['COL', 'MIL', 'INS',
'OTH'])]
newborns = births_mf[births_mf.persons != 0].copy()
newborns.rename(columns={'persons': 'newborns'}, inplace=True)
newborns = newborns.drop('households', 1)
births_mf = births_mf.drop('yr', 1)
# SPECIAL CASE:
# Births are estimated & reported out, but are not carried over into the
# next year ( base_population.type="HP" and base_population.mildep="Y")
# keep rows in which either type != 'HP' OR mildep != 'Y'
# which results in dropping rows where type = 'HP' AND mildep = 'Y'
births_mf = births_mf[((births_mf.type != 'HP') |
(births_mf.mildep != 'Y'))]
births_mf.rename(columns={'persons': 'new_born'}, inplace=True)
return births_mf
def compute_ins_oth_rate(pop):
pop2 = pop[(pop['type'] == 'HHP')]
pop2 = pop2.reset_index(drop=False)
pop2 = pd.DataFrame(pop2['persons'].groupby([pop2['age'], pop2['race_ethn'], pop2['sex']]).sum())
pop2.rename(columns={'persons': 'persons_sum'}, inplace=True)
pop2 = pop.join(pop2)
pop2['rates'] = np.where(pop2['type'].isin(['INS', 'OTH']), (pop2['persons'] / pop2['persons_sum']), 0)
return pop2[['mildep', 'type', 'rates']]
|
agpl-3.0
|
antgonza/qiita
|
qiita_db/metadata_template/test/test_sample_template.py
|
1
|
113647
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from tempfile import mkstemp
from os import close, remove
from collections import Iterable
from warnings import catch_warnings
from time import time
import numpy.testing as npt
import pandas as pd
from pandas.util.testing import assert_frame_equal
from qiita_core.util import qiita_test_checker
from qiita_core.exceptions import IncompetentQiitaDeveloperError
import qiita_db as qdb
STC = qdb.metadata_template.constants.SAMPLE_TEMPLATE_COLUMNS
@qiita_test_checker()
class TestSample(TestCase):
def setUp(self):
self.sample_template = \
qdb.metadata_template.sample_template.SampleTemplate(1)
self.sample_id = '1.SKB8.640193'
self.tester = qdb.metadata_template.sample_template.Sample(
self.sample_id, self.sample_template)
self.exp_categories = {'physical_specimen_location',
'physical_specimen_remaining',
'dna_extracted', 'sample_type', 'env_package',
'collection_timestamp', 'host_subject_id',
'description', 'season_environment',
'assigned_from_geo', 'texture', 'taxon_id',
'depth', 'host_taxid', 'common_name',
'water_content_soil', 'elevation', 'temp',
'tot_nitro', 'samp_salinity', 'altitude',
'env_biome', 'country', 'ph', 'anonymized_name',
'tot_org_carb', 'description_duplicate',
'env_feature', 'latitude', 'longitude',
'scientific_name'}
def test_init_unknown_error(self):
"""Init raises an error if the sample id is not found in the template
"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.metadata_template.sample_template.Sample(
'Not_a_Sample', self.sample_template)
def test_init_wrong_template(self):
"""Raises an error if using a PrepTemplate instead of SampleTemplate"""
with self.assertRaises(IncompetentQiitaDeveloperError):
qdb.metadata_template.sample_template.Sample(
'SKB8.640193',
qdb.metadata_template.prep_template.PrepTemplate(1))
def test_init(self):
"""Init correctly initializes the sample object"""
sample = qdb.metadata_template.sample_template.Sample(
self.sample_id, self.sample_template)
# Check that the internal id have been correctly set
self.assertEqual(sample._id, '1.SKB8.640193')
# Check that the internal template have been correctly set
self.assertEqual(sample._md_template, self.sample_template)
# Check that the internal dynamic table name have been correctly set
self.assertEqual(sample._dynamic_table, "sample_1")
def test_eq_true(self):
"""Equality correctly returns true"""
other = qdb.metadata_template.sample_template.Sample(
self.sample_id, self.sample_template)
self.assertTrue(self.tester == other)
def test_eq_false_type(self):
"""Equality returns false if types are not equal"""
other = qdb.metadata_template.prep_template.PrepSample(
self.sample_id,
qdb.metadata_template.prep_template.PrepTemplate(1))
self.assertFalse(self.tester == other)
def test_eq_false_id(self):
"""Equality returns false if ids are different"""
other = qdb.metadata_template.sample_template.Sample(
'1.SKD8.640184', self.sample_template)
self.assertFalse(self.tester == other)
def test_exists_true(self):
"""Exists returns true if the sample exists"""
self.assertTrue(qdb.metadata_template.sample_template.Sample.exists(
self.sample_id, self.sample_template))
def test_exists_false(self):
"""Exists returns false if the sample does not exists"""
self.assertFalse(qdb.metadata_template.sample_template.Sample.exists(
'Not_a_Sample', self.sample_template))
def test_get_categories(self):
"""Correctly returns the set of category headers"""
obs = self.tester._get_categories()
self.assertEqual(obs, self.exp_categories)
def test_len(self):
"""Len returns the correct number of categories"""
self.assertEqual(len(self.tester), 31)
def test_getitem_required(self):
"""Get item returns the correct metadata value from the required table
"""
self.assertEqual(self.tester['physical_specimen_location'], 'ANL')
self.assertEqual(self.tester['collection_timestamp'],
'2011-11-11 13:00:00')
self.assertTrue(self.tester['dna_extracted'])
def test_getitem_dynamic(self):
"""Get item returns the correct metadata value from the dynamic table
"""
self.assertEqual(self.tester['SEASON_ENVIRONMENT'], 'winter')
self.assertEqual(self.tester['depth'], '0.15')
def test_getitem_error(self):
"""Get item raises an error if category does not exists"""
with self.assertRaises(KeyError):
self.tester['Not_a_Category']
def test_iter(self):
"""iter returns an iterator over the category headers"""
obs = self.tester.__iter__()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_categories)
def test_contains_true(self):
"""contains returns true if the category header exists"""
self.assertTrue('DEPTH' in self.tester)
self.assertTrue('depth' in self.tester)
def test_contains_false(self):
"""contains returns false if the category header does not exists"""
self.assertFalse('Not_a_Category' in self.tester)
def test_keys(self):
"""keys returns an iterator over the metadata headers"""
obs = self.tester.keys()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_categories)
def test_values(self):
"""values returns an iterator over the values"""
obs = self.tester.values()
self.assertTrue(isinstance(obs, Iterable))
exp = {'ANL', 'true', 'true', 'ENVO:soil', '2011-11-11 13:00:00',
'1001:M7', 'Cannabis Soil Microbiome', 'winter', 'n',
'64.6 sand, 17.6 silt, 17.8 clay', '1118232', '0.15', '3483',
'root metagenome', '0.164', '114', '15', '1.41', '7.15', '0',
'ENVO:Temperate grasslands, savannas, and shrubland biome',
'GAZ:United States of America', '6.94', 'SKB8', '5',
'Burmese root', 'ENVO:plant-associated habitat',
'74.0894932572', '65.3283470202', '1118232', 'soil'}
self.assertCountEqual(set(obs), exp)
def test_items(self):
"""items returns an iterator over the (key, value) tuples"""
obs = self.tester.items()
self.assertTrue(isinstance(obs, Iterable))
exp = {('physical_specimen_location', 'ANL'),
('physical_specimen_remaining', 'true'),
('dna_extracted', 'true'),
('sample_type', 'ENVO:soil'),
('collection_timestamp', '2011-11-11 13:00:00'),
('host_subject_id', '1001:M7'),
('description', 'Cannabis Soil Microbiome'),
('env_package', 'soil'),
('season_environment', 'winter'), ('assigned_from_geo', 'n'),
('texture', '64.6 sand, 17.6 silt, 17.8 clay'),
('taxon_id', '1118232'), ('depth', '0.15'),
('host_taxid', '3483'), ('common_name', 'root metagenome'),
('water_content_soil', '0.164'), ('elevation', '114'),
('temp', '15'), ('tot_nitro', '1.41'),
('samp_salinity', '7.15'), ('altitude', '0'),
('env_biome',
'ENVO:Temperate grasslands, savannas, and shrubland biome'),
('country', 'GAZ:United States of America'), ('ph', '6.94'),
('anonymized_name', 'SKB8'), ('tot_org_carb', '5'),
('description_duplicate', 'Burmese root'),
('env_feature', 'ENVO:plant-associated habitat'),
('latitude', '74.0894932572'),
('longitude', '65.3283470202'),
('scientific_name', '1118232')}
self.assertEqual(set(obs), exp)
def test_get(self):
"""get returns the correct sample object"""
self.assertEqual(self.tester.get('SEASON_ENVIRONMENT'), 'winter')
self.assertEqual(self.tester.get('depth'), '0.15')
def test_get_none(self):
"""get returns none if the sample id is not present"""
self.assertTrue(self.tester.get('Not_a_Category') is None)
def test_columns_restrictions(self):
"""that it returns STC"""
self.assertEqual(
self.sample_template.columns_restrictions,
STC)
def test_can_be_updated(self):
"""test if the template can be updated"""
self.assertTrue(self.sample_template.can_be_updated())
def test_can_be_extended(self):
"""test if the template can be extended"""
obs_bool, obs_msg = self.sample_template.can_be_extended([], [])
self.assertTrue(obs_bool)
self.assertEqual(obs_msg, "")
def test_setitem(self):
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
self.tester['column that does not exist'] = 0.30
tester = qdb.metadata_template.sample_template.Sample(
'1.SKB1.640202', self.sample_template)
self.assertEqual(tester['tot_nitro'], '1.41')
tester['tot_nitro'] = '1234.5'
self.assertEqual(tester['tot_nitro'], '1234.5')
def test_delitem(self):
"""delitem raises an error (currently not allowed)"""
with self.assertRaises(qdb.exceptions.QiitaDBNotImplementedError):
del self.tester['DEPTH']
@qiita_test_checker()
class TestSampleTemplate(TestCase):
def setUp(self):
info = {
"timeseries_type_id": '1',
"metadata_complete": 'true',
"mixs_compliant": 'true',
"study_alias": "FCM",
"study_description": "Microbiome of people who eat nothing but "
"fried chicken",
"study_abstract": "Exploring how a high fat diet changes the "
"gut microbiome",
"principal_investigator_id": qdb.study.StudyPerson(3),
"lab_person_id": qdb.study.StudyPerson(1)
}
self.new_study = qdb.study.Study.create(
qdb.user.User('[email protected]'),
"Fried Chicken Microbiome %s" % time(), info)
self.metadata_dict = {
'Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample2': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 2',
'latitude': '4.2',
'longitude': '1.1',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample3': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 3',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
}
self.metadata = pd.DataFrame.from_dict(self.metadata_dict,
orient='index', dtype=str)
metadata_str_prefix_dict = {
'foo.Sample1': self.metadata_dict['Sample1'],
'bar.Sample2': self.metadata_dict['Sample2'],
'foo.Sample3': self.metadata_dict['Sample3'],
}
self.metadata_str_prefix = pd.DataFrame.from_dict(
metadata_str_prefix_dict, orient='index', dtype=str)
metadata_int_prefix_dict = {
'12.Sample1': self.metadata_dict['Sample1'],
'12.Sample2': self.metadata_dict['Sample2'],
'12.Sample3': self.metadata_dict['Sample3']
}
self.metadata_int_pref = pd.DataFrame.from_dict(
metadata_int_prefix_dict, orient='index', dtype=str)
metadata_prefixed_dict = {
'%d.Sample1' % self.new_study.id: self.metadata_dict['Sample1'],
'%d.Sample2' % self.new_study.id: self.metadata_dict['Sample2'],
'%d.Sample3' % self.new_study.id: self.metadata_dict['Sample3']
}
self.metadata_prefixed = pd.DataFrame.from_dict(
metadata_prefixed_dict, orient='index', dtype=str)
self.test_study = qdb.study.Study(1)
self.tester = qdb.metadata_template.sample_template.SampleTemplate(1)
self.exp_sample_ids = {
'1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195', '1.SKB4.640189',
'1.SKB5.640181', '1.SKB6.640176', '1.SKB7.640196', '1.SKB8.640193',
'1.SKB9.640200', '1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190', '1.SKD7.640191',
'1.SKD8.640184', '1.SKD9.640182', '1.SKM1.640183', '1.SKM2.640199',
'1.SKM3.640197', '1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192'}
self._clean_up_files = []
self.metadata_dict_updated_dict = {
'Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '6',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample2': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '5',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'the only one',
'Description': 'Test Sample 2',
'latitude': '4.2',
'longitude': '1.1',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample3': {'physical_specimen_location': 'new location',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '10',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 3',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
}
self.metadata_dict_updated = pd.DataFrame.from_dict(
self.metadata_dict_updated_dict, orient='index', dtype=str)
metadata_dict_updated_sample_error = {
'Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '6',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample2': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '5',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'the only one',
'Description': 'Test Sample 2',
'latitude': '4.2',
'longitude': '1.1',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample3': {'physical_specimen_location': 'new location',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '10',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 3',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample4': {'physical_specimen_location': 'new location',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '10',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}
}
self.metadata_dict_updated_sample_error = pd.DataFrame.from_dict(
metadata_dict_updated_sample_error, orient='index', dtype=str)
metadata_dict_updated_column_error = {
'Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '6',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'extra_col': True},
'Sample2': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '5',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'the only one',
'Description': 'Test Sample 2',
'latitude': '4.2',
'longitude': '1.1',
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'extra_col': True},
'Sample3': {'physical_specimen_location': 'new location',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': '10',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 3',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'extra_col': True},
}
self.metadata_dict_updated_column_error = pd.DataFrame.from_dict(
metadata_dict_updated_column_error, orient='index', dtype=str)
def tearDown(self):
for f in self._clean_up_files:
remove(f)
def test_metadata_headers(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
obs = ST.metadata_headers()
exp = ['altitude', 'anonymized_name', 'assigned_from_geo',
'collection_timestamp', 'common_name', 'country', 'depth',
'description', 'description_duplicate', 'dna_extracted',
'elevation', 'env_biome', 'env_feature', 'host_subject_id',
'host_taxid', 'latitude', 'longitude', 'ph', 'env_package',
'physical_specimen_location', 'physical_specimen_remaining',
'samp_salinity', 'sample_type', 'scientific_name',
'season_environment', 'taxon_id', 'temp', 'texture',
'tot_nitro', 'tot_org_carb', 'water_content_soil']
self.assertCountEqual(obs, exp)
def test_study_id(self):
"""Ensure that the correct study ID is returned"""
self.assertEqual(self.tester.study_id, 1)
def test_init_unknown_error(self):
"""Init raises an error if the id is not known"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.metadata_template.sample_template.SampleTemplate(200000)
def test_init(self):
"""Init successfully instantiates the object"""
st = qdb.metadata_template.sample_template.SampleTemplate(1)
self.assertTrue(st.id, 1)
def test_table_name(self):
"""Table name return the correct string"""
obs = qdb.metadata_template.sample_template.SampleTemplate._table_name(
self.test_study.id)
self.assertEqual(obs, "sample_1")
def test_exists_true(self):
"""Exists returns true when the SampleTemplate already exists"""
self.assertTrue(
qdb.metadata_template.sample_template.SampleTemplate.exists(
self.test_study.id))
def test_get_sample_ids(self):
"""get_sample_ids returns the correct set of sample ids"""
obs = self.tester._get_sample_ids()
self.assertEqual(obs, self.exp_sample_ids)
def test_len(self):
"""Len returns the correct number of sample ids"""
self.assertEqual(len(self.tester), 27)
def test_getitem(self):
"""Get item returns the correct sample object"""
obs = self.tester['1.SKM7.640188']
exp = qdb.metadata_template.sample_template.Sample(
'1.SKM7.640188', self.tester)
self.assertEqual(obs, exp)
def test_getitem_error(self):
"""Get item raises an error if key does not exists"""
with self.assertRaises(KeyError):
self.tester['Not_a_Sample']
def test_categories(self):
exp = {'season_environment', 'assigned_from_geo',
'texture', 'taxon_id', 'depth', 'host_taxid',
'common_name', 'water_content_soil', 'elevation',
'temp', 'tot_nitro', 'samp_salinity', 'altitude',
'env_biome', 'country', 'ph', 'anonymized_name',
'tot_org_carb', 'description_duplicate', 'env_feature',
'physical_specimen_location', 'env_package',
'physical_specimen_remaining', 'dna_extracted',
'sample_type', 'collection_timestamp', 'host_subject_id',
'description', 'latitude', 'longitude', 'scientific_name'}
obs = set(self.tester.categories)
self.assertCountEqual(obs, exp)
def test_iter(self):
"""iter returns an iterator over the sample ids"""
obs = self.tester.__iter__()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_sample_ids)
def test_contains_true(self):
"""contains returns true if the sample id exists"""
self.assertTrue('1.SKM7.640188' in self.tester)
def test_contains_false(self):
"""contains returns false if the sample id does not exists"""
self.assertFalse('Not_a_Sample' in self.tester)
def test_keys(self):
"""keys returns an iterator over the sample ids"""
obs = self.tester.keys()
self.assertTrue(isinstance(obs, Iterable))
self.assertEqual(set(obs), self.exp_sample_ids)
def test_values(self):
"""values returns an iterator over the values"""
obs = self.tester.values()
self.assertTrue(isinstance(obs, Iterable))
exp = {qdb.metadata_template.sample_template.Sample('1.SKB1.640202',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB2.640194',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB3.640195',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB4.640189',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB5.640181',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB6.640176',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB7.640196',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB8.640193',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKB9.640200',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD1.640179',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD2.640178',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD3.640198',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD4.640185',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD5.640186',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD6.640190',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD7.640191',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD8.640184',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKD9.640182',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM1.640183',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM2.640199',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM3.640197',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM4.640180',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM5.640177',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM6.640187',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM7.640188',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM8.640201',
self.tester),
qdb.metadata_template.sample_template.Sample('1.SKM9.640192',
self.tester)}
# Creating a list and looping over it since unittest does not call
# the __eq__ function on the objects
for o, e in zip(sorted(list(obs), key=lambda x: x.id),
sorted(exp, key=lambda x: x.id)):
self.assertEqual(o, e)
def test_items(self):
"""items returns an iterator over the (key, value) tuples"""
obs = self.tester.items()
self.assertTrue(isinstance(obs, Iterable))
exp = [
('1.SKB1.640202', qdb.metadata_template.sample_template.Sample(
'1.SKB1.640202', self.tester)),
('1.SKB2.640194', qdb.metadata_template.sample_template.Sample(
'1.SKB2.640194', self.tester)),
('1.SKB3.640195', qdb.metadata_template.sample_template.Sample(
'1.SKB3.640195', self.tester)),
('1.SKB4.640189', qdb.metadata_template.sample_template.Sample(
'1.SKB4.640189', self.tester)),
('1.SKB5.640181', qdb.metadata_template.sample_template.Sample(
'1.SKB5.640181', self.tester)),
('1.SKB6.640176', qdb.metadata_template.sample_template.Sample(
'1.SKB6.640176', self.tester)),
('1.SKB7.640196', qdb.metadata_template.sample_template.Sample(
'1.SKB7.640196', self.tester)),
('1.SKB8.640193', qdb.metadata_template.sample_template.Sample(
'1.SKB8.640193', self.tester)),
('1.SKB9.640200', qdb.metadata_template.sample_template.Sample(
'1.SKB9.640200', self.tester)),
('1.SKD1.640179', qdb.metadata_template.sample_template.Sample(
'1.SKD1.640179', self.tester)),
('1.SKD2.640178', qdb.metadata_template.sample_template.Sample(
'1.SKD2.640178', self.tester)),
('1.SKD3.640198', qdb.metadata_template.sample_template.Sample(
'1.SKD3.640198', self.tester)),
('1.SKD4.640185', qdb.metadata_template.sample_template.Sample(
'1.SKD4.640185', self.tester)),
('1.SKD5.640186', qdb.metadata_template.sample_template.Sample(
'1.SKD5.640186', self.tester)),
('1.SKD6.640190', qdb.metadata_template.sample_template.Sample(
'1.SKD6.640190', self.tester)),
('1.SKD7.640191', qdb.metadata_template.sample_template.Sample(
'1.SKD7.640191', self.tester)),
('1.SKD8.640184', qdb.metadata_template.sample_template.Sample(
'1.SKD8.640184', self.tester)),
('1.SKD9.640182', qdb.metadata_template.sample_template.Sample(
'1.SKD9.640182', self.tester)),
('1.SKM1.640183', qdb.metadata_template.sample_template.Sample(
'1.SKM1.640183', self.tester)),
('1.SKM2.640199', qdb.metadata_template.sample_template.Sample(
'1.SKM2.640199', self.tester)),
('1.SKM3.640197', qdb.metadata_template.sample_template.Sample(
'1.SKM3.640197', self.tester)),
('1.SKM4.640180', qdb.metadata_template.sample_template.Sample(
'1.SKM4.640180', self.tester)),
('1.SKM5.640177', qdb.metadata_template.sample_template.Sample(
'1.SKM5.640177', self.tester)),
('1.SKM6.640187', qdb.metadata_template.sample_template.Sample(
'1.SKM6.640187', self.tester)),
('1.SKM7.640188', qdb.metadata_template.sample_template.Sample(
'1.SKM7.640188', self.tester)),
('1.SKM8.640201', qdb.metadata_template.sample_template.Sample(
'1.SKM8.640201', self.tester)),
('1.SKM9.640192', qdb.metadata_template.sample_template.Sample(
'1.SKM9.640192', self.tester))]
# Creating a list and looping over it since unittest does not call
# the __eq__ function on the objects
for o, e in zip(sorted(list(obs)), sorted(exp)):
self.assertEqual(o, e)
def test_get(self):
"""get returns the correct sample object"""
obs = self.tester.get('1.SKM7.640188')
exp = qdb.metadata_template.sample_template.Sample(
'1.SKM7.640188', self.tester)
self.assertEqual(obs, exp)
def test_get_none(self):
"""get returns none if the sample id is not present"""
self.assertTrue(self.tester.get('Not_a_Sample') is None)
def test_clean_validate_template_error_bad_chars(self):
"""Raises an error if there are invalid characters in the sample names
"""
self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']
ST = qdb.metadata_template.sample_template.SampleTemplate
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_error_duplicate_cols(self):
"""Raises an error if there are duplicated columns in the template"""
self.metadata['SAMPLE_TYPE'] = pd.Series(['foo', 'bar', 'foobar'],
index=self.metadata.index)
ST = qdb.metadata_template.sample_template.SampleTemplate
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_error_duplicate_samples(self):
"""Raises an error if there are duplicated samples in the template"""
self.metadata.index = ['sample.1', 'sample.1', 'sample.3']
ST = qdb.metadata_template.sample_template.SampleTemplate
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateSamplesError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_columns(self):
metadata_dict = {
'Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index',
dtype=str)
ST = qdb.metadata_template.sample_template.SampleTemplate
obs = ST._clean_validate_template(
metadata,
2,
current_columns=STC)
metadata_dict = {
'2.Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41'}
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index', dtype=str)
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp, check_like=True)
def test_clean_validate_template(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
obs = ST._clean_validate_template(
self.metadata,
2,
current_columns=STC)
metadata_dict = {
'2.Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'2.Sample2': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 2',
'latitude': '4.2',
'longitude': '1.1',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'2.Sample3': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp':
'2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 3',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index', dtype=str)
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp, check_like=True)
def test_clean_validate_template_no_pgsql_reserved_words(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.metadata.rename(columns={'taxon_id': 'select'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_qiime2_reserved_words(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.metadata.rename(columns={'taxon_id': 'featureid'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_invalid_chars(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.metadata.rename(columns={'taxon_id': 'taxon id'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_invalid_chars2(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.metadata.rename(columns={'taxon_id': 'bla.'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_invalid_chars3(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.metadata.rename(columns={'taxon_id': 'this&is'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_forbidden_words(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.metadata.rename(columns={'taxon_id': 'sampleid'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_forbidden_words2(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
# A word forbidden only in SampleTemplate
self.metadata.rename(columns={'taxon_id': 'linkerprimersequence'},
inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
def test_clean_validate_template_no_forbidden_words3(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
# A word forbidden only in SampleTemplate
self.metadata.rename(columns={'taxon_id': 'barcode'}, inplace=True)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
ST._clean_validate_template(self.metadata, 2)
# this test migrated to SampleTemplate, from MetadataTemplate, to test
# _identify_forbidden_words_in_column_names() with a usable list of
# forbidden words.
def test_identify_forbidden_words_in_column_names(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
# tests filtering for sample_id, when it is not the first element
# verifies all forbidden elements for base class are returned
# verifies a forbidden word in sub-class will not be returned
# verifies normal column names are not returned
results = ST._identify_forbidden_words_in_column_names([
'just_fine3',
'sampleid',
'alice',
'linkerprimersequence',
'bob',
'qiita_study_id',
'qiita_prep_id',
'eve'])
self.assertEqual(set(results),
{'qiita_prep_id',
'qiita_study_id',
'linkerprimersequence',
'sampleid'})
def test_silent_drop(self):
ST = qdb.metadata_template.sample_template.SampleTemplate
self.assertNotIn('qiitq_prep_id',
(ST._clean_validate_template(self.metadata,
2)).columns.tolist())
def test_get_category(self):
pt = qdb.metadata_template.sample_template.SampleTemplate(1)
obs = pt.get_category('latitude')
exp = {'1.SKB2.640194': '35.2374368957',
'1.SKM4.640180': 'Not applicable',
'1.SKB3.640195': '95.2060749748',
'1.SKB6.640176': '78.3634273709',
'1.SKD6.640190': '29.1499460692',
'1.SKM6.640187': '0.291867635913',
'1.SKD9.640182': '23.1218032799',
'1.SKM8.640201': '3.21190859967',
'1.SKM2.640199': '82.8302905615',
'1.SKD2.640178': '53.5050692395',
'1.SKB7.640196': '13.089194595',
'1.SKD4.640185': '40.8623799474',
'1.SKB8.640193': '74.0894932572',
'1.SKM3.640197': 'Not applicable',
'1.SKD5.640186': '85.4121476399',
'1.SKB1.640202': '4.59216095574',
'1.SKM1.640183': '38.2627021402',
'1.SKD1.640179': '68.0991287718',
'1.SKD3.640198': '84.0030227585',
'1.SKB5.640181': '10.6655599093',
'1.SKB4.640189': '43.9614715197',
'1.SKB9.640200': '12.6245524972',
'1.SKM9.640192': '12.7065957714',
'1.SKD8.640184': '57.571893782',
'1.SKM5.640177': '44.9725384282',
'1.SKM7.640188': '60.1102854322',
'1.SKD7.640191': '68.51099627'}
self.assertEqual(obs, exp)
def test_get_category_no_exists(self):
pt = qdb.metadata_template.sample_template.SampleTemplate(1)
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
pt.get_category('DOESNOTEXIST')
def test_create_duplicate(self):
"""Create raises an error when creating a duplicated SampleTemplate"""
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateError):
qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.test_study)
def test_create_duplicate_header(self):
"""Create raises an error when duplicate headers are present"""
self.metadata['SAMPLE_TYPE'] = pd.Series(['', '', ''],
index=self.metadata.index)
with self.assertRaises(qdb.exceptions.QiitaDBDuplicateHeaderError):
qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
def test_create_bad_sample_names(self):
"""Create raises an error when duplicate headers are present"""
# set a horrible list of sample names
self.metadata.index = ['o()xxxx[{::::::::>', 'sample.1', 'sample.3']
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
def test_create(self):
"""Creates a new SampleTemplate"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
new_id = self.new_study.id
# The returned object has the correct id
self.assertEqual(st.id, new_id)
self.assertEqual(st.study_id, self.new_study.id)
self.assertTrue(
qdb.metadata_template.sample_template.SampleTemplate.exists(
self.new_study.id))
exp_sample_ids = {"%s.Sample1" % new_id, "%s.Sample2" % new_id,
"%s.Sample3" % new_id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 3)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample2" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample3" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
for s_id in exp_sample_ids:
self.assertDictEqual(st[s_id]._to_dict(), exp_dict[s_id])
exp = {"%s.Sample1" % new_id: None,
"%s.Sample2" % new_id: None,
"%s.Sample3" % new_id: None}
self.assertEqual(st.ebi_sample_accessions, exp)
self.assertEqual(st.biosample_accessions, exp)
def test_create_int_prefix(self):
"""Creates a new SampleTemplate with sample names int prefixed"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata_int_pref, self.new_study)
new_id = self.new_study.id
# The returned object has the correct id
self.assertEqual(st.id, new_id)
self.assertEqual(st.study_id, self.new_study.id)
self.assertTrue(
qdb.metadata_template.sample_template.SampleTemplate.exists(
self.new_study.id))
exp_sample_ids = {"%s.12.Sample1" % new_id, "%s.12.Sample2" % new_id,
"%s.12.Sample3" % new_id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 3)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.12.Sample1" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.12.Sample2" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.12.Sample3" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
exp = {"%s.12.Sample1" % new_id: None,
"%s.12.Sample2" % new_id: None,
"%s.12.Sample3" % new_id: None}
self.assertEqual(st.ebi_sample_accessions, exp)
self.assertEqual(st.biosample_accessions, exp)
def test_create_str_prefixes(self):
"""Creates a new SampleTemplate with sample names string prefixed"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata_str_prefix, self.new_study)
new_id = self.new_study.id
# The returned object has the correct id
self.assertEqual(st.id, new_id)
self.assertEqual(st.study_id, self.new_study.id)
self.assertTrue(
qdb.metadata_template.sample_template.SampleTemplate.exists(
self.new_study.id))
exp_sample_ids = {"%s.foo.Sample1" % new_id, "%s.bar.Sample2" % new_id,
"%s.foo.Sample3" % new_id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 3)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.foo.Sample1" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.bar.Sample2" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.foo.Sample3" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
exp = {"%s.foo.Sample1" % new_id: None,
"%s.bar.Sample2" % new_id: None,
"%s.foo.Sample3" % new_id: None}
self.assertEqual(st.ebi_sample_accessions, exp)
self.assertEqual(st.biosample_accessions, exp)
def test_create_already_prefixed_samples(self):
"""Creates a new SampleTemplate with the samples already prefixed"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata_prefixed, self.new_study)
new_id = self.new_study.id
# The returned object has the correct id
self.assertEqual(st.id, new_id)
self.assertEqual(st.study_id, self.new_study.id)
self.assertTrue(
qdb.metadata_template.sample_template.SampleTemplate.exists(
self.new_study.id))
exp_sample_ids = {"%s.Sample1" % new_id, "%s.Sample2" % new_id,
"%s.Sample3" % new_id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 3)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample2" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample3" % new_id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
exp = {"%s.Sample1" % new_id: None,
"%s.Sample2" % new_id: None,
"%s.Sample3" % new_id: None}
self.assertEqual(st.ebi_sample_accessions, exp)
self.assertEqual(st.biosample_accessions, exp)
def test_delete(self):
"""Deletes Sample template 1"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
qdb.metadata_template.sample_template.SampleTemplate.delete(st.id)
exp = []
with qdb.sql_connection.TRN:
sql = """SELECT * FROM qiita.study_sample WHERE study_id = %s"""
qdb.sql_connection.TRN.add(sql, [st.id])
obs = qdb.sql_connection.TRN.execute_fetchindex()
self.assertEqual(obs, exp)
with self.assertRaises(ValueError):
with qdb.sql_connection.TRN:
sql = """SELECT *
FROM qiita.sample_%d""" % st.id
qdb.sql_connection.TRN.add(sql)
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.metadata_template.sample_template.SampleTemplate.delete(1)
def test_delete_unkonwn_id_error(self):
"""Try to delete a non existent prep template"""
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
qdb.metadata_template.sample_template.SampleTemplate.delete(5)
def test_exists_false(self):
"""Exists returns false when the SampleTemplate does not exists"""
self.assertFalse(
qdb.metadata_template.sample_template.SampleTemplate.exists(
self.new_study.id))
def test_update_category(self):
with self.assertRaises(qdb.exceptions.QiitaDBUnknownIDError):
self.tester.update_category('country', {"foo": "bar"})
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
self.tester.update_category('missing column',
{'1.SKM7.640188': 'stuff'})
negtest = self.tester['1.SKM7.640188']['country']
mapping = {'1.SKB1.640202': "1",
'1.SKB5.640181': "2",
'1.SKD6.640190': "3"}
self.tester.update_category('country', mapping)
self.assertEqual(self.tester['1.SKB1.640202']['country'], "1")
self.assertEqual(self.tester['1.SKB5.640181']['country'], "2")
self.assertEqual(self.tester['1.SKD6.640190']['country'], "3")
self.assertEqual(self.tester['1.SKM7.640188']['country'], negtest)
def test_update_equal(self):
"""It doesn't fail with the exact same template"""
# Create a new sample tempalte
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
exp = {s_id: st[s_id]._to_dict() for s_id in st}
# Try to update the sample template with the same values
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, st.update, self.metadata)
# Check that no values have been changed
obs = {s_id: st[s_id]._to_dict() for s_id in st}
self.assertEqual(obs, exp)
def test_update(self):
"""Updates values in existing mapping file"""
# creating a new sample template
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
# updating the sample template
st.update(self.metadata_dict_updated)
# validating values
exp = self.metadata_dict_updated_dict['Sample1'].values()
obs = st.get('%s.Sample1' % self.new_study.id).values()
self.assertCountEqual(obs, exp)
exp = self.metadata_dict_updated_dict['Sample2'].values()
obs = st.get('%s.Sample2' % self.new_study.id).values()
self.assertCountEqual(obs, exp)
exp = self.metadata_dict_updated_dict['Sample3'].values()
obs = st.get('%s.Sample3' % self.new_study.id).values()
self.assertCountEqual(obs, exp)
# checking errors
with self.assertRaises(qdb.exceptions.QiitaDBError):
st.update(self.metadata_dict_updated_sample_error)
with self.assertRaises(qdb.exceptions.QiitaDBError):
st.update(self.metadata_dict_updated_column_error)
def test_update_fewer_samples(self):
"""Updates using a dataframe with less samples that in the DB"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
new_metadata = pd.DataFrame.from_dict(
{'Sample1': {'physical_specimen_location': 'CHANGE'}},
orient='index', dtype=str)
exp = {s_id: st[s_id]._to_dict() for s_id in st}
s_id = '%d.Sample1' % self.new_study.id
exp[s_id]['physical_specimen_location'] = 'CHANGE'
st.update(new_metadata)
obs = {s_id: st[s_id]._to_dict() for s_id in st}
self.assertEqual(obs, exp)
def test_update_numpy(self):
"""Update values in existing mapping file with numpy values"""
ST = qdb.metadata_template.sample_template.SampleTemplate
metadata_dict = {
'Sample1': {'bool_col': 'true',
'date_col': '2015-09-01 00:00:00'},
'Sample2': {'bool_col': 'true',
'date_col': '2015-09-01 00:00:00'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index',
dtype=str)
st = npt.assert_warns(qdb.exceptions.QiitaDBWarning, ST.create,
metadata, self.new_study)
metadata_dict['Sample2']['date_col'] = '2015-09-01 00:00:00'
metadata_dict['Sample1']['bool_col'] = 'false'
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index',
dtype=str)
npt.assert_warns(qdb.exceptions.QiitaDBWarning, st.update, metadata)
with qdb.sql_connection.TRN:
sql = "SELECT * FROM qiita.sample_{0}".format(st.id)
qdb.sql_connection.TRN.add(sql)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [
['%s.Sample2' % self.new_study.id, {
'bool_col': 'true', 'date_col': '2015-09-01 00:00:00'}],
['%s.Sample1' % self.new_study.id, {
'bool_col': 'false', 'date_col': '2015-09-01 00:00:00'}],
['qiita_sample_column_names', {
'columns': sorted(['bool_col', 'date_col'])}]]
# making sure they are always in the same order
obs[2][1]['columns'] = sorted(obs[2][1]['columns'])
self.assertEqual(sorted(obs), sorted(exp))
def test_generate_files(self):
fp_count = qdb.util.get_count("qiita.filepath")
self.tester.generate_files()
obs = qdb.util.get_count("qiita.filepath")
# We just make sure that the count has been increased by 3, since
# the contents of the files have been tested elsewhere.
self.assertEqual(obs, fp_count + 3)
def test_to_file(self):
"""to file writes a tab delimited file with all the metadata"""
fd, fp = mkstemp()
close(fd)
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
st.to_file(fp)
self._clean_up_files.append(fp)
with open(fp, newline=None) as f:
obs = f.read()
self.assertEqual(obs, EXP_SAMPLE_TEMPLATE.format(self.new_study.id))
fd, fp = mkstemp()
close(fd)
st.to_file(fp, {'%s.Sample1' % self.new_study.id,
'%s.Sample3' % self.new_study.id})
self._clean_up_files.append(fp)
with open(fp, newline=None) as f:
obs = f.read()
self.assertEqual(
obs, EXP_SAMPLE_TEMPLATE_FEWER_SAMPLES.format(self.new_study.id))
def test_get_filepath(self):
# we will check that there is a new id only because the path will
# change based on time and the same functionality is being tested
# in data.py
with qdb.sql_connection.TRN:
sql = "SELECT last_value FROM qiita.filepath_filepath_id_seq"
qdb.sql_connection.TRN.add(sql)
exp_id = qdb.sql_connection.TRN.execute_fetchflatten()[0] + 1
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
self.assertEqual(st.get_filepaths()[0][0], exp_id)
# testing current functionaly, to add a new sample template
# you need to erase it first
qdb.metadata_template.sample_template.SampleTemplate.delete(st.id)
exp_id += 1
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
self.assertEqual(st.get_filepaths()[0][0], exp_id)
def test_extend_add_samples(self):
"""extend correctly works adding new samples"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
# we just created the sample info file so we should only have one
# filepath
self.assertEqual(len(st.get_filepaths()), 1)
md_dict = {
'Sample4': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample5': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 5',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
md_ext = pd.DataFrame.from_dict(md_dict, orient='index', dtype=str)
npt.assert_warns(qdb.exceptions.QiitaDBWarning, st.extend, md_ext)
# we just updated so we should have 2 files:
self.assertEqual(len(st.get_filepaths()), 2)
# let's extend again to tests that a new file is not created
st.extend(md_ext)
self.assertEqual(len(st.get_filepaths()), 2)
# Test samples have been added correctly
exp_sample_ids = {"%s.Sample1" % st.id, "%s.Sample2" % st.id,
"%s.Sample3" % st.id, "%s.Sample4" % st.id,
"%s.Sample5" % st.id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 5)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample2" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample3" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'%s.Sample4' % st.id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'%s.Sample5' % st.id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 5',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
def test_extend_add_duplicate_samples(self):
"""extend correctly works adding new samples and warns for duplicates
"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
self.metadata_dict['Sample4'] = {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}
# Change a couple of values on the existent samples to test that
# they remain unchanged
self.metadata_dict['Sample1']['Description'] = 'Changed'
self.metadata_dict['Sample2']['scientific_name'] = 'Changed dynamic'
md_ext = pd.DataFrame.from_dict(self.metadata_dict, orient='index',
dtype=str)
# Make sure adding duplicate samples raises warning
npt.assert_warns(qdb.exceptions.QiitaDBWarning, st.extend, md_ext)
# Make sure the new sample has been added and the values for the
# existent samples did not change
exp_sample_ids = {"%s.Sample1" % st.id, "%s.Sample2" % st.id,
"%s.Sample3" % st.id, "%s.Sample4" % st.id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 4)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample2" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
"%s.Sample3" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'%s.Sample4' % st.id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
def test_extend_new_columns(self):
"""extend correctly adds a new column"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
self.metadata['texture'] = pd.Series(['val1', 'val2', 'val3'],
index=self.metadata.index)
self.metadata['TOT_NITRO'] = pd.Series(['val_1', 'val_2', 'val_3'],
index=self.metadata.index)
# Change some values to make sure that they do not change on extend
self.metadata_dict['Sample1']['Description'] = 'Changed'
self.metadata_dict['Sample2']['scientific_name'] = 'Changed dynamic'
# Make sure it raises a warning indicating that the new columns will
# be added for the existing samples
npt.assert_warns(
qdb.exceptions.QiitaDBWarning, st.extend, self.metadata)
exp_sample_ids = {"%s.Sample1" % st.id, "%s.Sample2" % st.id,
"%s.Sample3" % st.id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 3)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id',
'texture', 'tot_nitro'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'texture': 'val1',
'tot_nitro': 'val_1'},
"%s.Sample2" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'texture': 'val2',
'tot_nitro': 'val_2'},
"%s.Sample3" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'texture': 'val3',
'tot_nitro': 'val_3'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
def test_extend_new_samples_and_columns(self):
"""extend correctly adds new samples and columns at the same time"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
self.metadata_dict['Sample4'] = {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}
# Change a couple of values on the existent samples to test that
# they remain unchanged
self.metadata_dict['Sample1']['Description'] = 'Changed'
self.metadata_dict['Sample2']['dna_extracted'] = 'Changed dynamic'
md_ext = pd.DataFrame.from_dict(self.metadata_dict, orient='index',
dtype=str)
md_ext['TOT_NITRO'] = pd.Series(['val1', 'val2', 'val3', 'val4'],
index=md_ext.index)
# Make sure adding duplicate samples raises warning
npt.assert_warns(qdb.exceptions.QiitaDBWarning, st.extend, md_ext)
exp_sample_ids = {"%s.Sample1" % st.id, "%s.Sample2" % st.id,
"%s.Sample3" % st.id, "%s.Sample4" % st.id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 4)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id', 'tot_nitro'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 1",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val1'},
"%s.Sample2" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val2'},
"%s.Sample3" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val3'},
'%s.Sample4' % st.id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val4'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
def test_extend_update(self):
"""extend correctly adds new samples and columns at the same time"""
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
# test updating with same data, none of the rest of the code/tests
# should change
st.extend_and_update(self.metadata)
self.metadata_dict['Sample4'] = {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}
self.metadata_dict['Sample1']['Description'] = 'Changed'
self.metadata_dict['Sample2']['scientific_name'] = 'Changed dynamic'
md_ext = pd.DataFrame.from_dict(self.metadata_dict, orient='index',
dtype=str)
md_ext['TOT_NITRO'] = pd.Series(['val1', 'val2', 'val3', 'val4'],
index=md_ext.index)
npt.assert_warns(qdb.exceptions.QiitaDBWarning, st.extend_and_update,
md_ext)
exp_sample_ids = {"%s.Sample1" % st.id, "%s.Sample2" % st.id,
"%s.Sample3" % st.id, "%s.Sample4" % st.id}
self.assertEqual(st._get_sample_ids(), exp_sample_ids)
self.assertEqual(len(st), 4)
exp_categories = {'collection_timestamp', 'description',
'dna_extracted', 'host_subject_id', 'latitude',
'longitude', 'physical_specimen_location',
'physical_specimen_remaining', 'sample_type',
'scientific_name', 'taxon_id', 'tot_nitro'}
self.assertCountEqual(st.categories, exp_categories)
exp_dict = {
"%s.Sample1" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Changed",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '42.42',
'longitude': '41.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val1'},
"%s.Sample2" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 2",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.2',
'longitude': '1.1',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'Changed dynamic',
'tot_nitro': 'val2'},
"%s.Sample3" % st.id: {
'collection_timestamp': '2014-05-29 12:24:15',
'description': "Test Sample 3",
'dna_extracted': 'true',
'host_subject_id': "NotIdentified",
'latitude': '4.8',
'longitude': '4.41',
'physical_specimen_location': "location1",
'physical_specimen_remaining': 'true',
'sample_type': "type1",
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val3'},
'%s.Sample4' % st.id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens',
'tot_nitro': 'val4'}}
for s_id in exp_sample_ids:
self.assertEqual(st[s_id]._to_dict(), exp_dict[s_id])
def test_to_dataframe(self):
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
obs = st.to_dataframe()
new_id = self.new_study.id
exp_dict = {
'%s.Sample1' % new_id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'qiita_study_id': str(new_id),
'scientific_name': 'homo sapiens'},
'%s.Sample2' % new_id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 2',
'latitude': '4.2',
'longitude': '1.1',
'taxon_id': '9606',
'qiita_study_id': str(new_id),
'scientific_name': 'homo sapiens'},
'%s.Sample3' % new_id: {
'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 3',
'latitude': '4.8',
'longitude': '4.41',
'taxon_id': '9606',
'qiita_study_id': str(new_id),
'scientific_name': 'homo sapiens'},
}
exp = pd.DataFrame.from_dict(exp_dict, orient='index', dtype=str)
exp.index.name = 'sample_id'
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp, check_column_type=False)
obs = self.tester.to_dataframe()
# We don't test the specific values as this would blow up the size
# of this file as the amount of lines would go to ~1000
# 27 samples
self.assertEqual(len(obs), 27)
exp = {'1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200',
'1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190',
'1.SKD7.640191', '1.SKD8.640184', '1.SKD9.640182',
'1.SKM1.640183', '1.SKM2.640199', '1.SKM3.640197',
'1.SKM4.640180', '1.SKM5.640177', '1.SKM6.640187',
'1.SKM7.640188', '1.SKM8.640201', '1.SKM9.640192'}
self.assertEqual(set(obs.index), exp)
exp_columns = {
'physical_specimen_location', 'physical_specimen_remaining',
'dna_extracted', 'sample_type', 'collection_timestamp',
'host_subject_id', 'description', 'latitude', 'longitude',
'season_environment', 'assigned_from_geo', 'texture',
'taxon_id', 'depth', 'host_taxid', 'common_name', 'env_package',
'water_content_soil', 'elevation', 'temp', 'tot_nitro',
'samp_salinity', 'altitude', 'env_biome', 'country', 'ph',
'anonymized_name', 'tot_org_carb', 'description_duplicate',
'env_feature', 'scientific_name', 'qiita_study_id'}
self.assertEqual(set(obs.columns), exp_columns)
# test limiting samples produced
exp_samples = set(['1.SKD4.640185', '1.SKD5.640186'])
obs = self.tester.to_dataframe(samples=exp_samples)
self.assertEqual(len(obs), 2)
self.assertEqual(set(obs.index), exp_samples)
self.assertEqual(set(obs.columns), exp_columns)
# test with add_ebi_accessions as True
obs = self.tester.to_dataframe(True)
self.assertEqual(
self.tester.ebi_sample_accessions,
obs.qiita_ebi_sample_accessions.to_dict())
def test_check_restrictions(self):
obs = self.tester.check_restrictions(
[STC['EBI']])
self.assertEqual(obs, set([]))
def test_ebi_sample_accessions(self):
obs = self.tester.ebi_sample_accessions
exp = {'1.SKB8.640193': 'ERS000000',
'1.SKD8.640184': 'ERS000001',
'1.SKB7.640196': 'ERS000002',
'1.SKM9.640192': 'ERS000003',
'1.SKM4.640180': 'ERS000004',
'1.SKM5.640177': 'ERS000005',
'1.SKB5.640181': 'ERS000006',
'1.SKD6.640190': 'ERS000007',
'1.SKB2.640194': 'ERS000008',
'1.SKD2.640178': 'ERS000009',
'1.SKM7.640188': 'ERS000010',
'1.SKB1.640202': 'ERS000011',
'1.SKD1.640179': 'ERS000012',
'1.SKD3.640198': 'ERS000013',
'1.SKM8.640201': 'ERS000014',
'1.SKM2.640199': 'ERS000015',
'1.SKB9.640200': 'ERS000016',
'1.SKD5.640186': 'ERS000017',
'1.SKM3.640197': 'ERS000018',
'1.SKD9.640182': 'ERS000019',
'1.SKB4.640189': 'ERS000020',
'1.SKD7.640191': 'ERS000021',
'1.SKM6.640187': 'ERS000022',
'1.SKD4.640185': 'ERS000023',
'1.SKB3.640195': 'ERS000024',
'1.SKB6.640176': 'ERS000025',
'1.SKM1.640183': 'ERS000025'}
self.assertEqual(obs, exp)
obs = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study).ebi_sample_accessions
exp = {"%s.Sample1" % self.new_study.id: None,
"%s.Sample2" % self.new_study.id: None,
"%s.Sample3" % self.new_study.id: None}
self.assertEqual(obs, exp)
def test_ebi_sample_accessions_setter(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.tester.ebi_sample_accessions = {'1.SKB8.640193': 'ERS000010',
'1.SKD8.640184': 'ERS000001'}
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
exp_acc = {"%s.Sample1" % self.new_study.id: 'ERS000100',
"%s.Sample2" % self.new_study.id: 'ERS000110'}
st.ebi_sample_accessions = exp_acc
exp_acc["%s.Sample3" % self.new_study.id] = None
self.assertEqual(st.ebi_sample_accessions, exp_acc)
exp_acc["%s.Sample3" % self.new_study.id] = 'ERS0000120'
st.ebi_sample_accessions = exp_acc
self.assertEqual(st.ebi_sample_accessions, exp_acc)
# We need to wrap the assignment in a function so we can use
# npt.assert_warns
def f():
st.ebi_sample_accessions = exp_acc
npt.assert_warns(qdb.exceptions.QiitaDBWarning, f)
def test_biosample_accessions(self):
obs = self.tester.biosample_accessions
exp = {'1.SKB8.640193': 'SAMEA0000000',
'1.SKD8.640184': 'SAMEA0000001',
'1.SKB7.640196': 'SAMEA0000002',
'1.SKM9.640192': 'SAMEA0000003',
'1.SKM4.640180': 'SAMEA0000004',
'1.SKM5.640177': 'SAMEA0000005',
'1.SKB5.640181': 'SAMEA0000006',
'1.SKD6.640190': 'SAMEA0000007',
'1.SKB2.640194': 'SAMEA0000008',
'1.SKD2.640178': 'SAMEA0000009',
'1.SKM7.640188': 'SAMEA0000010',
'1.SKB1.640202': 'SAMEA0000011',
'1.SKD1.640179': 'SAMEA0000012',
'1.SKD3.640198': 'SAMEA0000013',
'1.SKM8.640201': 'SAMEA0000014',
'1.SKM2.640199': 'SAMEA0000015',
'1.SKB9.640200': 'SAMEA0000016',
'1.SKD5.640186': 'SAMEA0000017',
'1.SKM3.640197': 'SAMEA0000018',
'1.SKD9.640182': 'SAMEA0000019',
'1.SKB4.640189': 'SAMEA0000020',
'1.SKD7.640191': 'SAMEA0000021',
'1.SKM6.640187': 'SAMEA0000022',
'1.SKD4.640185': 'SAMEA0000023',
'1.SKB3.640195': 'SAMEA0000024',
'1.SKB6.640176': 'SAMEA0000025',
'1.SKM1.640183': 'SAMEA0000026'}
self.assertEqual(obs, exp)
obs = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study).biosample_accessions
exp = {"%s.Sample1" % self.new_study.id: None,
"%s.Sample2" % self.new_study.id: None,
"%s.Sample3" % self.new_study.id: None}
self.assertEqual(obs, exp)
def test_biosample_accessions_setter(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
self.tester.biosample_accessions = {'1.SKB8.640193': 'SAMEA110000',
'1.SKD8.640184': 'SAMEA110000'}
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
exp_acc = {"%s.Sample1" % self.new_study.id: 'SAMEA110000',
"%s.Sample2" % self.new_study.id: 'SAMEA120000'}
st.biosample_accessions = exp_acc
exp_acc["%s.Sample3" % self.new_study.id] = None
self.assertEqual(st.biosample_accessions, exp_acc)
exp_acc["%s.Sample3" % self.new_study.id] = 'SAMEA130000'
st.biosample_accessions = exp_acc
self.assertEqual(st.biosample_accessions, exp_acc)
# We need to wrap the assignment in a function so we can use
# npt.assert_warns
def f():
st.biosample_accessions = exp_acc
npt.assert_warns(qdb.exceptions.QiitaDBWarning, f)
def test_validate_template_warning_missing(self):
"""Warns if the template is missing a required column"""
metadata_dict = {
'Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41'}
}
metadata = pd.DataFrame.from_dict(metadata_dict, orient='index',
dtype=str)
ST = qdb.metadata_template.sample_template.SampleTemplate
obs = ST._clean_validate_template(metadata, 2)
metadata_dict = {
'2.Sample1': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'host_subject_id': 'NotIdentified',
'description': 'Test Sample 1',
'latitude': '42.42',
'longitude': '41.41'}
}
exp = pd.DataFrame.from_dict(metadata_dict, orient='index', dtype=str)
obs.sort_index(axis=0, inplace=True)
obs.sort_index(axis=1, inplace=True)
exp.sort_index(axis=0, inplace=True)
exp.sort_index(axis=1, inplace=True)
assert_frame_equal(obs, exp, check_like=True)
def test_validate_template_warning_missing_restrictions(self):
del self.metadata['collection_timestamp']
st = npt.assert_warns(
qdb.exceptions.QiitaDBWarning,
qdb.metadata_template.sample_template.SampleTemplate.create,
self.metadata, self.new_study)
obs = st.check_restrictions(
[STC['EBI']])
self.assertEqual(obs, {'collection_timestamp'})
def test_validate_errors(self):
self.metadata.at['Sample1', 'collection_timestamp'] = 'wrong date'
self.metadata.at['Sample2', 'latitude'] = 'wrong latitude'
self.metadata.at['Sample3', 'latitude'] = None
with catch_warnings(record=True) as warn:
qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
# it should only return one warning
self.assertEqual(len(warn), 1)
warn = warn[0]
# it should be QiitaDBWarning
self.assertEqual(warn.category, qdb.exceptions.QiitaDBWarning)
# it should contain this text
message = str(warn.message)
exp_error = ('Sample "%s.Sample2", column "latitude", wrong value '
'"wrong latitude"' % self.new_study.id)
self.assertIn(exp_error, message)
exp_error = ('Sample "%s.Sample1", column "collection_timestamp", '
'wrong value "wrong date"' % self.new_study.id)
self.assertIn(exp_error, message)
exp_error = ('Sample "%s.Sample3", column "latitude", '
'wrong value "None"' % self.new_study.id)
self.assertIn(exp_error, message)
def test_validate_errors_timestampA_year4digits(self):
column = 'collection_timestamp'
self.metadata.at['Sample1', column] = '2016-09-20 12:00'
self.metadata.at['Sample2', column] = '2016-09-20 12'
self.metadata.at['Sample3', column] = '2016-09-20'
with catch_warnings(record=True) as warn:
qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
# the warnings should be empty
self.assertEqual(warn, [])
def test_validate_errors_timestampA_year2digits(self):
column = 'collection_timestamp'
self.metadata.at['Sample1', column] = '16-09-20 12:00'
self.metadata.at['Sample2', column] = '9/20/16 12'
self.metadata.at['Sample3', column] = '09-20-16'
with catch_warnings(record=True) as warn:
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
exp_message = (
'Some functionality will be disabled due to missing '
'columns:\n\t'
'Sample "{0}.Sample1", column "collection_timestamp", '
'wrong value "16-09-20 12:00";\n\t'
'Sample "{0}.Sample2", column "collection_timestamp", '
'wrong value "9/20/16 12";\n\t'
'Sample "{0}.Sample3", column "collection_timestamp", '
'wrong value "09-20-16".\n'
'See the Templates tutorial '
'for a description of these fields.'.format(st.id))
# warnings is a list of 1 element
self.assertEqual(len(warn), 1)
# the order might change so testing by elements
self.assertCountEqual(str(warn[0].message).split('\n'),
exp_message.split('\n'))
def test_validate_errors_timestampB_year4digits(self):
column = 'collection_timestamp'
self.metadata.at['Sample1', column] = '2016-12'
self.metadata.at['Sample2', column] = '2016'
with catch_warnings(record=True) as warn:
qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
# the warnings should be empty
self.assertEqual(warn, [])
def test_validate_errors_timestampB_year2digits(self):
column = 'collection_timestamp'
self.metadata.at['Sample1', column] = '16-12'
self.metadata.at['Sample2', column] = '16'
with catch_warnings(record=True) as warn:
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
exp_message = (
'Some functionality will be disabled due to missing '
'columns:\n\t'
'Sample "{0}.Sample1", column "collection_timestamp", wrong '
'value "16-12";\n\t'
'Sample "{0}.Sample2", column "collection_timestamp", wrong '
'value "16".\n'
'See the Templates tutorial for a description '
'of these fields.'.format(st.id))
# warnings is a list of 1 element
self.assertEqual(len(warn), 1)
self.assertEqual(str(warn[0].message), exp_message)
def test_delete_column(self):
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
st.delete_column('dna_extracted')
self.assertNotIn('dna_extracted', st.categories)
def test_delete_column_specimen_id(self):
st = qdb.metadata_template.sample_template.SampleTemplate.create(
self.metadata, self.new_study)
self.new_study.specimen_id_column = 'latitude'
with self.assertRaisesRegex(
qdb.exceptions.QiitaDBOperationNotPermittedError,
'"latitude" cannot be deleted, this column is currently '
r'selected as the tube identifier \(specimen_id_column\)'):
st.delete_column('latitude')
self.new_study.specimen_id_column = None
def test_delete_samples(self):
QE = qdb.exceptions
st = qdb.metadata_template.sample_template.SampleTemplate(1)
md_dict = {
'Sample4': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample5': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'},
'Sample6': {'physical_specimen_location': 'location1',
'physical_specimen_remaining': 'true',
'dna_extracted': 'true',
'sample_type': 'type1',
'collection_timestamp': '2014-05-29 12:24:15',
'host_subject_id': 'NotIdentified',
'Description': 'Test Sample 4',
'latitude': '42.42',
'longitude': '41.41',
'taxon_id': '9606',
'scientific_name': 'homo sapiens'}}
md_ext = pd.DataFrame.from_dict(md_dict, orient='index', dtype=str)
npt.assert_warns(QE.QiitaDBWarning, st.extend, md_ext)
st.delete_samples(['1.Sample4'])
self.assertNotIn('1.Sample4', st.keys())
self.assertIn('1.Sample5', st.keys())
self.assertIn('1.Sample6', st.keys())
st.delete_samples(['1.Sample5', '1.Sample6'])
self.assertNotIn('1.Sample5', st.keys())
self.assertNotIn('1.Sample6', st.keys())
# testing errors
with self.assertRaises(QE.QiitaDBUnknownIDError):
st.delete_samples(['not.existing.sample'])
with self.assertRaises(QE.QiitaDBOperationNotPermittedError):
st.delete_samples(['1.SKM5.640177'])
EXP_SAMPLE_TEMPLATE = (
"sample_name\tcollection_timestamp\tdescription\tdna_extracted\t"
"host_subject_id\tlatitude\tlongitude\tphysical_specimen_location\t"
"physical_specimen_remaining\tqiita_study_id\tsample_type\t"
"scientific_name\ttaxon_id\n"
"{0}.Sample1\t2014-05-29 12:24:15\tTest Sample 1\ttrue\tNotIdentified\t"
"42.42\t41.41\tlocation1\ttrue\t{0}\ttype1\thomo sapiens\t9606\n"
"{0}.Sample2\t2014-05-29 12:24:15\tTest Sample 2\ttrue\tNotIdentified\t"
"4.2\t1.1\tlocation1\ttrue\t{0}\ttype1\thomo sapiens\t9606\n"
"{0}.Sample3\t2014-05-29 12:24:15\tTest Sample 3\ttrue\tNotIdentified\t"
"4.8\t4.41\tlocation1\ttrue\t{0}\ttype1\thomo sapiens\t9606\n")
EXP_SAMPLE_TEMPLATE_FEWER_SAMPLES = (
"sample_name\tcollection_timestamp\tdescription\tdna_extracted\t"
"host_subject_id\tlatitude\tlongitude\tphysical_specimen_location\t"
"physical_specimen_remaining\tqiita_study_id\tsample_type\t"
"scientific_name\ttaxon_id\n"
"{0}.Sample1\t2014-05-29 12:24:15\tTest Sample 1\ttrue\tNotIdentified\t"
"42.42\t41.41\tlocation1\ttrue\t{0}\ttype1\thomo sapiens\t9606\n"
"{0}.Sample3\t2014-05-29 12:24:15\tTest Sample 3\ttrue\tNotIdentified\t"
"4.8\t4.41\tlocation1\ttrue\t{0}\ttype1\thomo sapiens\t9606\n")
if __name__ == '__main__':
main()
|
bsd-3-clause
|
peterwilletts24/Python-Scripts
|
era_interim/era_plot_geopotential.py
|
1
|
10741
|
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib import cm
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm as cm_base
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as colors
from matplotlib.patches import Polygon
import scipy.interpolate
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
rcParams['font.weight']='normal'
rcParams['text.color']='#262626'
plot_levels = [925, 850, 700, 500]
#plot_levels = [925]
plot_type='mean'
plot_diag='temperature'
min_contour = 0
max_contour = 2
tick_interval=0.2
lon_high_plot = 102
lon_low_plot = 64
lat_high_plot= 30.
lat_low_plot=-10
divisor=10 # for lat/lon rounding
geopotential, latitude_domsingle, longitude_domsingle= pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_geopotential_mean.p', 'rb'))
variable, latitude_domsinglet, longitude_domsinglet= pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_%s_mean.p' % plot_diag, 'rb'))
u_wind, latitude_domsingleu, longitude_domsingleu= pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_u_wind_mean.p', 'rb'))
v_wind, latitude_domsinglev, longitude_domsinglev= pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_v_wind_mean.p', 'rb'))
pressure_levels = pickle.load(open('/nfs/a90/eepdw/Data/Saved_data/era_i/era_i_emb_pressure_levels.p', 'rb'))
# convert geopotential to geopotential height
# Calculate total at each lat,lon position
#mean_dom = np.mean(pcp_dom, axis=0)
#sum_dom = np.sum(pcp_dom, axis=0)
lons= longitude_domsingle[:]
lats = latitude_domsingle[:]
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lons,lats = np.meshgrid(lons, lats)
for p in plot_levels:
# Find index of plot level in datasets
s = np.searchsorted(pressure_levels[::-1], p)
# Get plot grids on pressure level
# /9.81 to convert geopotential to geopotential height
plt_h = geopotential[-(s+1),:,:]/9.81
plt_u_wind = u_wind[-(s+1),:,:]
plt_v_wind = v_wind[-(s+1),:,:]
plt_v = variable[-(s+1),:,:]
## Regrid winds onto 2 degree grid for clarity when plotting
# 2 degree lats lon lists for wind regridding
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
lons_wi, lats_wi = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
fl_la_lo = (lats.flatten(),lons.flatten())
u = scipy.interpolate.griddata(fl_la_lo, plt_u_wind.flatten(), (lats_wi, lons_wi), method='linear')
v = scipy.interpolate.griddata(fl_la_lo, plt_v_wind.flatten(), (lats_wi, lons_wi), method='linear')
m_title = 'Height of %s-hPa level (m)' % (p)
# Set pressure height contour min/max
if p == 925:
clev_min = 680.
clev_max = 810.
elif p == 850:
clev_min = 1435.
clev_max = 1530.
elif p == 700:
clev_min = 3090.
clev_max = 3155.
elif p == 500:
clev_min = 5800.
clev_max = 5890.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p == 925:
clevpt_min = 298.
clevpt_max = 310.
elif p == 850:
clevpt_min = 302.
clevpt_max = 312.
elif p == 700:
clevpt_min = 312.
clevpt_max = 320.
elif p == 500:
clevpt_min = 325.
clevpt_max = 332.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p == 925:
clevsh_min = 0.012
clevsh_max = 0.020
elif p == 850:
clevsh_min = 0.007
clevsh_max = 0.017
elif p == 700:
clevsh_min = 0.002
clevsh_max = 0.010
elif p == 500:
clevsh_min = 0.001
clevsh_max = 0.005
else:
print 'Specific humidity min/max not set for this pressure level'
clevvort_min = -5
clevvort_max = 5
#clevs_col = np.arange(clev_min, clev_max,256)
clevs_lin = np.linspace(clev_min, clev_max, num=20)
m =\
Basemap(llcrnrlon=lon_low_plot,llcrnrlat=lat_low_plot,urcrnrlon=lon_high_plot,urcrnrlat=lat_high_plot,projection='mill', rsphere=6371229)
x, y = m(lons, lats)
x_w, y_w = m(lons_wi, lats_wi)
fig=plt.figure(figsize=(8,8))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
# draw coastlines, state and country boundaries, edge of map.
m.drawcoastlines(linewidth=0.5,color='#262626')
#m.drawstates()
m.drawcountries(linewidth=0.5,color='#262626')
# draw parallels.
parallels = np.arange(0.,90,divisor)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10, color='#262626' )
# draw meridians
meridians = np.arange(0.,360., divisor)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10, color='#262626')
#ny = mean_dom.shape[0]; nx = mean_dom.shape[1]
#lons, lats = m.makegrid(longitude_dom[1,:], latitude_dom[1,:]) # get lat/lons of ny by nx evenly space grid.
# draw geopotential contour lines
cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='#262626',linewidths=0.5)
if plot_diag=='temperature':
clevspt_nums=clevpt_max-clevpt_min+1
plt_v = np.ma.masked_outside(plt_v, clevpt_max+20, clevpt_min-20)
tick_gap=2
cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max, clevspt_nums), cmap=plt.cm.jet, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
#cbar.ax.tick_params(labelsize=12, colors='#262626')
tick_gap=2
ticks= np.arange(int(clevpt_min),int(clevpt_max)+tick_gap,tick_gap)
cbar.set_ticks(ticks, update_ticks=True)
cbar.set_ticklabels(([r"${%s}$" % x for x in ticks]))
cbar.set_label('Potential Temperature ${\\theta}$(K)', fontsize=12, color='#262626')
plt.suptitle('Height, Potential Temperature and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='sphum':
clevssh_nums=clevpt_max-clevpt_min+1
plt_v = np.ma.masked_outside(plt_v, clevsh_max+20, clevsh_min-20)
cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max, clevssh_nums), cmap=plt.cm.jet_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f')
tick_gap=0.002
ticks= np.arange(clevsh_min,clevsh_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%.3f}$" % x for x in ticks]) )
cbar.set_label('Specific Humidity ${\\phi}$(kg/kg)', fontsize=12, color='#262626')
plt.suptitle('Height, Specific Humidity and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='vort':
clevvort_min = -5
clevvort_max = 5
cs_col = m.contourf(x,y, plt_v*(10**5), np.linspace(clevvort_min, clevvort_max), cmap=plt.cm.RdBu_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%i')
tick_gap=1
ticks= np.arange(clevvort_min,clevvort_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%i}$" % x for x in ticks]) )
cbar.set_label('Relative Vorticity (${10^{-5}\ s^{-1}}$)', fontsize=12, color='#262626')
plt.suptitle('Height, Relative Vorticity and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='ptvort':
clevvort_min = -0.1
clevvort_max = 0.5
cs_col = m.contourf(x,y, plt_v*(10**6), np.linspace(clevvort_min, clevvort_max), cmap=plt.cm.RdBu_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
#K m**2 kg**-1 s**-1
tick_gap=0.1
ticks= np.arange(clevvort_min,clevvort_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%.1f}$" % x for x in ticks]) )
cbar.set_label('Potential Vorticity (${K\ m^{2}\ kg^{-1}\ s^{-1}}$)', fontsize=12, color='#262626')
plt.suptitle('Height, Potential Vorticity and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
elif plot_diag=='div':
clevvort_min = -1.5
clevvort_max = 1.5
cs_col = m.contourf(x,y, plt_v*(10**5), np.linspace(clevvort_min, clevvort_max), cmap=plt.cm.RdBu_r, extend='both')
cbar = m.colorbar(cs_col,location='bottom',pad="5%")
tick_gap=0.3
ticks= np.arange(clevvort_min,clevvort_max+tick_gap,tick_gap)
cbar.set_ticks(ticks)
cbar.set_ticklabels((["${%.1f}$" % x for x in ticks]) )
cbar.set_label('Divergence of Wind (${s^{-1}}$)', fontsize=12, color='#262626')
plt.suptitle('Height, Divergence and Wind Vectors at %s hPa'% (p), fontsize=16, color='#262626')
# Scale 150 for diff plots, scale 400 for mean state plots
# wind = m.quiver(x_w,y_w, u, v, scale=150,color='#262626' )
wind = m.quiver(x_w,y_w, u, v, scale=400, color='#262626' )
qk = plt.quiverkey(wind, 0.14, 0.072, 5, '${5\ ms^{-1}}$', labelpos='W', fontproperties={'weight':'heavy', 'size':'14'}, labelcolor='#262626', color='#262626' )
plt.clabel(cs_lin, fontsize=10, fmt="${%i}$", color='#262626')
# cbar.ax.tick_params(labelsize=10, color='#262626', ')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_mean_EMBRACE_period_%shPa_%s_shorttitle.png' % (p,plot_diag), format='png', bbox_inches='tight')
#plt.title('TRMM Ra for EMBRACE Period ' , fontsize=16, color='#262626')
plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_mean_EMBRACE_period_%shPa_%s.png' % (p,plot_diag), format='png', bbox_inches='tight')
plt.suptitle('', visible=False)
plt.savefig('/nfs/a90/eepdw/Figures/ERA_Interim/Era_Interim_mean_EMBRACE_period_%shPa_%s_notitle.png' % (p,plot_diag), format='png', bbox_inches='tight')
|
mit
|
whitead/electronic-dance-music
|
algorithms/hill_design/hill_design.py
|
1
|
1306
|
import numpy as np
import matplotlib.pyplot as plt
from math import *
from scipy.special import erf
def sigmoid(x):
if(x > 1):
return 0
if(x < 0):
return 1
return 2. * x**3 - 3. * x ** 2 + 1
v_sigmoid = np.vectorize(sigmoid)
def make_hill(x, min, max, sigma):
xx = np.arange(min,max,0.01)
expo = np.exp(-(xx - x)**2 / (2*sigma**2))
denom = sqrt(2*pi) * sigma / 2. * (erf((xx - min) / (sqrt(2.) * sigma)) + erf((max - xx) / (sqrt(2.) * sigma)))
func_denom = denom + (0.5*sqrt(2*pi)*sigma - denom) * v_sigmoid((xx - min)/(sqrt(2.)*sigma)) + (0.5*sqrt(2*pi)*sigma - denom) * v_sigmoid((max - xx)/(sqrt(2.)*sigma))
func = expo + (exp(-(x-min)**2 / (2*sigma**2)) - expo) * v_sigmoid((xx-min)/(sqrt(2.)*sigma)) + (exp(-(x-max)**2 / (2.*sigma**2)) - expo) * v_sigmoid((max-xx)/(sqrt(2.)*sigma))
return xx, func / func_denom, expo / denom
x,y1,y2 = make_hill(0., 0., 10., 2.)
plt.plot(x,y1, color="blue")
plt.plot(x,y2, color="blue", linestyle='--')
x,y1,y2 = make_hill(1., 0., 10., 2.)
plt.plot(x,y1, color="green")
plt.plot(x,y2, color="green", linestyle='--')
x,y1,y2 = make_hill(2., 0., 10., 2.)
plt.plot(x,y1, color="red")
plt.plot(x,y2, color="red", linestyle='--')
#x,y = make_hill(9.8, 0, 10, 2)
#plt.plot(x,y)
plt.savefig("hill.png")
|
gpl-3.0
|
cerrno/neurokernel
|
neurokernel/pm.py
|
1
|
16781
|
#!/usr/bin/env python
"""
Port mapper classes.
"""
import numpy as np
import pandas as pd
from plsel import SelectorMethods
class BasePortMapper(object):
"""
Maps integer sequence to/from path-like port identifiers.
Examples
--------
>>> pm = BasePortMapper('/[a,b][0:2]')
>>> print pm.ports_to_inds('/b[0:2]')
array([2, 3])
>>> print pm.inds_to_ports([0, 1])
[('a', 0), ('a', 1)]
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]) to map to `data`.
portmap : sequence of int
Integer indices to map to port identifiers. If no map is specified,
it is assumed to be an array of consecutive integers from 0
through one less than the number of ports.
Attributes
----------
index : pandas.MultiIndex
Index of port identifiers.
portmap : pandas.Series
Map of port identifiers to integer indices.
Notes
-----
The selectors may not contain any '*' or '[:]' characters.
A single port identifier may be mapped to multiple integer indices,
but not vice-versa.
"""
def __init__(self, selector, portmap=None):
self.sel = SelectorMethods()
N = self.sel.count_ports(selector)
if portmap is None:
self.portmap = pd.Series(data=np.arange(N))
else:
assert len(portmap) == N
self.portmap = pd.Series(data=np.array(portmap))
self.portmap.index = self.sel.make_index(selector)
def copy(self):
"""
Return copy of this port mapper.
Returns
-------
result : neurokernel.plsel.BasePortMapper
Copy of port mapper instance.
"""
c = BasePortMapper('')
c.portmap = self.portmap.copy()
return c
@classmethod
def from_index(cls, idx, portmap=None):
"""
Create port mapper from a Pandas index and a sequence of integer indices.
Parameters
----------
index : pandas.MultiIndex
Index containing selector data.
portmap : sequence of int
Integer indices to map to port identifiers. If no map is specified,
it is assumed to be an array of consecutive integers from 0
through one less than the number of ports.
Returns
-------
result : neurokernel.plsel.BasePortMapper
New port mapper instance.
Notes
-----
If specified, the portmap sequence is copied into the new mapper to avoid
side effects associated with modifying the specified sequence after
mapper instantiation.
"""
pm = cls('')
N = len(idx)
if portmap is None:
pm.portmap = pd.Series.from_array(np.arange(N), idx)
else:
assert len(portmap) == N
pm.portmap = pd.Series.from_array(np.array(portmap), idx)
return pm
@classmethod
def from_pm(cls, pm):
"""
Create a new port mapper instance given an existing instance.
Parameters
----------
result : neurokernel.plsel.BasePortMapper
Existing port mapper instance.
Returns
-------
result : neurokernel.plsel.BasePortMapper
New port mapper instance.
"""
assert isinstance(pm, cls)
r = cls('')
r.portmap = pm.portmap.copy()
return r
@property
def index(self):
"""
Port mapper index.
"""
return self.portmap.index
@index.setter
def index(self, i):
self.portmap.index = i
def inds_to_ports(self, inds):
"""
Convert list of integer indices to port identifiers.
Examples
--------
>>> pm = BasePortMapper('/[a,b][0:2]')
>>> print pm.inds_to_ports([0, 1])
[('a', 0), ('a', 1)]
Parameters
----------
inds : array_like of int
Integer indices of ports.
Returns
-------
t : list of tuple
Expanded port identifiers.
"""
return self.portmap[self.portmap.isin(inds)].index.tolist()
def ports_to_inds(self, selector):
"""
Convert port selector to list of integer indices.
Examples
--------
>>> pm = BasePortMapper('/[a,b][0:2]')
>>> print pm.ports_to_inds('/b[0:2]')
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]).
Returns
-------
inds : numpy.ndarray of int
Integer indices of ports comprised by selector.
"""
return self.sel.select(self.portmap,
selector).dropna().astype(np.int64).values
def get_map(self, selector):
"""
Retrieve integer indices associated with selector.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]).
Returns
-------
result : numpy.ndarray
Selected data.
"""
return np.asarray(self.sel.select(self.portmap, selector).dropna())
def set_map(self, selector, portmap):
"""
Set mapped integer index associated with selector.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]).
portmap : sequence of int
Integer indices to map to port identifiers.
"""
self.portmap[self.sel.get_index(self.portmap, selector)] = portmap
def equals(self, pm):
"""
Check whether this mapper is equivalent to another mapper.
Parameters
----------
pm : neurokernel.plsel.BasePortMapper
Mapper to compare to this mapper.
Returns
-------
result : bool
True if the specified port mapper contains the same port
identifiers as this instance and maps them to the same integer
values.
Notes
-----
The port identifiers and maps in the specified port mapper need not be
in the same order as this instance to be deemed equal.
"""
assert isinstance(pm, BasePortMapper)
pm0 = self.portmap.order()
pm1 = pm.portmap.order()
if np.array_equal(pm0.values, pm1.values) and \
pm0.index.equals(pm1.index):
return True
else:
return False
def __len__(self):
return self.portmap.size
def __repr__(self):
return 'Map:\n----\n'+self.portmap.__repr__()
class PortMapper(BasePortMapper):
"""
Maps a numpy array to/from path-like port identifiers.
Examples
--------
>>> data = np.array([1, 0, 3, 2, 5, 2])
>>> pm = PortMapper('/d[0:5]', data)
>>> print pm['/d[1]']
array([0])
>>> print pm['/d[2:4]']
array([3, 2])
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]) to map to `data`.
data : numpy.ndarray
1D data array to map to ports. If no data array is specified, port
identifiers will still be mapped to their sequential indices but
__getitem__() and __setitem__() will raise exceptions if invoked.
portmap : sequence of int
Integer indices to map to port identifiers. If no map is specified,
it is assumed to be an array of consecutive integers from 0
through one less than the number of ports.
make_copy : bool
If True, map a copy of the specified data array to the specified
port identifiers.
Attributes
----------
data : numpy.ndarray
Data that has been mapped to ports.
dtype : numpy.dtype
Type of mapped data.
index : pandas.MultiIndex
Index of port identifiers.
portmap : pandas.Series
Map of port identifiers to integer indices into `data`.
Notes
-----
The selectors may not contain any '*' or '[:]' characters.
"""
def _validate_data(self, data):
"""
Check whether the mapper's ports are compatible with the specified port data array.
"""
# A port mapper may contain or be assigned None as its data array:
if data is None:
return True
try:
# Cannot handle more than 1 dimension:
assert np.ndim(data) <= 1
# The integers in the port map must be valid indices into the
# data array:
# assert max(self.portmap) < len(data)
# The port mapper may map identifiers to some portion of the data array:
# assert len(self) <= len(data)
except:
return False
else:
return True
def __init__(self, selector, data=None, portmap=None, make_copy=True):
super(PortMapper, self).__init__(selector, portmap)
self._data = None
if data is None:
self.data = None
else:
if np.ndim(data) == 0:
self.data = np.full(len(self), data)
else:
if make_copy:
self.data = data.copy()
else:
self.data = data
@property
def data(self):
"""
Data associated with ports.
"""
return self._data
@data.setter
def data(self, x):
if self._validate_data(x):
if x is None:
self._data = None
# Always store dimensionless values in a 1D array:
elif np.ndim(x) == 0:
self._data = np.array([x])
else:
if len(x):
self._data = np.asarray(x)
else:
self._data = None
else:
raise ValueError('incompatible or invalid data array specified')
def copy(self):
"""
Return copy of this port mapper.
Returns
-------
result : neurokernel.plsel.PortMapper
Copy of port mapper instance.
"""
c = self.__class__('')
c.portmap = self.portmap.copy()
c.data = self.data.copy()
return c
@classmethod
def from_index(cls, idx, data, portmap=None):
raise NotImplementedError
@classmethod
def from_pm(cls, pm):
"""
Create a new port mapper instance given an existing instance.
Parameters
----------
result : neurokernel.plsel.PortMapper
Existing port mapper instance.
Returns
-------
result : neurokernel.plsel.PortMapper
New port mapper instance.
"""
assert isinstance(pm, cls)
r = cls('')
r.portmap = pm.portmap.copy()
r.data = pm.data.copy()
return r
@property
def dtype(self):
"""
Port mapper data type.
"""
return self.data.dtype
@dtype.setter
def dtype(self, d):
self.data.dtype = d
def get(self, selector):
"""
Retrieve mapped data specified by given selector.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]).
Returns
-------
result : numpy.ndarray
Selected data.
"""
if self.data is None:
raise ValueError('port mapper contains no data')
return self.data[np.asarray(self.sel.select(self.portmap, selector).dropna().values, dtype=np.int)]
def get_by_inds(self, inds):
"""
Retrieve mapped data specified by integer index.
Parameters
----------
inds : sequence of int
Integer indices of data elements to return.
Returns
-------
result : numpy.ndarray
Selected data.
"""
if self.data is None:
raise ValueError('port mapper contains no data')
return self.data[inds]
def get_ports(self, f):
"""
Select ports using a data selection function.
Parameters
----------
f : callable or sequence
If callable, treat as elementwise selection function to apply to
the mapped data array. If a sequence, treat as an index into the
mapped data array.
Returns
-------
s : list of tuple
Expanded port identifiers selected by the specified function
or boolean array.
"""
assert callable(f) or (np.iterable(f) and len(f) == len(self.data))
if callable(f):
idx = self.portmap[f(self.data)].index
else:
idx = self.portmap[f].index
return self.sel.index_to_selector(idx)
def get_inds_nonzero(self):
"""
Select indices of ports with nonzero data.
Returns
-------
inds : numpy.ndarray
Array of integer indices.
"""
return np.nonzero(self.data)[0]
def get_ports_nonzero(self):
"""
Select ports with nonzero data.
Returns
-------
s : list of tuple
Expanded port identifiers whose corresponding data is nonzero.
"""
return self.get_ports(lambda x: np.nonzero(x)[0])
def get_ports_as_inds(self, f):
"""
Select integer indices corresponding to ports in map.
Examples
--------
>>> import numpy as np
>>> pm = PortMapper(np.array([0, 1, 0, 1, 0]), '/a[0:5]')
>>> pm.get_ports_as_inds(lambda x: np.asarray(x, dtype=np.bool))
array([1, 3])
Parameters
----------
f : callable or sequence
If callable, treat as elementwise selection function to apply to
the mapped data array. If a sequence, treat as an index into the
mapped data array.
Returns
-------
inds : numpy.ndarray of int
Integer indices of selected ports.
"""
assert callable(f) or (np.iterable(f) and len(f) == len(self.data))
if callable(f):
v = self.portmap[f(self.data)].values
else:
v = self.portmap[f].values
return v
def set(self, selector, data):
"""
Set mapped data specified by given selector.
Parameters
----------
selector : str, unicode, or sequence
Selector string (e.g., '/foo[0:2]') or sequence of token sequences
(e.g., [['foo', (0, 2)]]).
data : numpy.ndarray
Array of data to save.
"""
# sel.select will return a Series with nan for selector [()], hence dropna
# is necessary here
if self.data is None:
self.data = data
else:
self.data[np.asarray(self.sel.select(self.portmap, selector).dropna().values, dtype=np.int)] = data
def set_by_inds(self, inds, data):
"""
Set mapped data by integer indices.
Parameters
----------
inds : sequence of int
Integer indices of data elements to update.
data : numpy.ndarray
Data to assign.
"""
self.data[inds] = data
__getitem__ = get
__setitem__ = set
def equals(self, other):
"""
Check whether this mapper is equivalent to another mapper.
Parameters
----------
other : neurokernel.plsel.PortMapper
Mapper to compare to this mapper.
Returns
-------
result : bool
True if the mappers map the same selectors to the same integer
indices and data.
Notes
-----
Mappers containing the same rows in different orders are not
regarded as equivalent.
"""
assert isinstance(other, self.__class__)
return self.portmap.equals(other.portmap) and (self.data == other.data).all()
def __repr__(self):
return 'Map:\n----\n'+self.portmap.__repr__()+'\n\ndata:\n'+self.data.__repr__()
|
bsd-3-clause
|
mirnylab/cooler
|
tests/test_create_ingest.py
|
1
|
16586
|
# -*- coding: utf-8 -*-
import os.path as op
import tempfile
# import filecmp
import os
from pandas.api import types
import numpy as np
import pandas as pd
import h5py
import cooler
from cooler.cli.cload import (
tabix as cload_tabix,
pairix as cload_pairix,
pairs as cload_pairs,
)
from cooler.cli.load import load
import pytest
_pandas_major_version = int(pd.__version__.split('.')[0])
tmp = tempfile.gettempdir()
testdir = op.realpath(op.dirname(__file__))
testcool_path = op.join(tmp, "test.cool")
def _alternating_bins(chromsizes, steps):
def _each(chrom):
clen = chromsizes[chrom]
edges = [0]
total = 0
i = 0
while edges[i] < clen:
step = steps[i % len(steps)]
total += step
i += 1
edges.append(total)
print(edges)
edges[-1] = clen
return pd.DataFrame(
{"chrom": chrom, "start": edges[:-1], "end": edges[1:]},
columns=["chrom", "start", "end"],
)
return pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True)
def test_from_hdf5_pairs():
def should_not_depend_on_chunksize(chromsizes, bintable, mock_pairs):
# try different chunk sizes
binner = cooler.create.HDF5Aggregator(
mock_pairs, chromsizes, bintable, chunksize=66
)
cooler.create.create(testcool_path, bintable, binner)
with h5py.File(testcool_path, "r") as h5:
oc1 = h5["indexes"]["chrom_offset"][:]
ob1 = h5["indexes"]["bin1_offset"][:]
p1 = cooler.api.pixels(h5, join=False)
binner = cooler.create.HDF5Aggregator(
mock_pairs, chromsizes, bintable, chunksize=666
)
cooler.create.create(testcool_path, bintable, binner)
with h5py.File(testcool_path, "r") as h5:
oc2 = h5["indexes"]["chrom_offset"][:]
ob2 = h5["indexes"]["bin1_offset"][:]
p2 = cooler.api.pixels(h5, join=False)
assert np.all(oc1 == oc2)
assert np.all(ob1 == ob2)
assert np.all(p1.values == p2.values)
def should_raise_if_input_not_sorted(chromsizes, bintable, mock_pairs):
# not sorted by chrm1
# with h5py.File(testcool_path, 'w') as h5:
bad_reads = {
"chrms1": mock_pairs["chrms2"],
"cuts1": mock_pairs["cuts2"],
"chrms2": mock_pairs["chrms1"],
"cuts2": mock_pairs["cuts1"],
}
with pytest.raises(ValueError):
cooler.create.HDF5Aggregator(bad_reads, chromsizes, bintable, chunksize=66)
# not triu
bad_reads = {
"chrms1": mock_pairs["chrms1"].copy(),
"cuts1": mock_pairs["cuts1"].copy(),
"chrms2": mock_pairs["chrms2"].copy(),
"cuts2": mock_pairs["cuts2"].copy(),
}
bad_reads["chrms1"][0] = 0
bad_reads["chrms2"][0] = 0
bad_reads["cuts1"][0] = 10
bad_reads["cuts2"][0] = 9
binner = cooler.create.HDF5Aggregator(
bad_reads, chromsizes, bintable, chunksize=66
)
with pytest.raises(ValueError):
cooler.create.create(testcool_path, bintable, binner)
def should_work_with_int32_cols(chromsizes, bintable, mock_pairs):
# int64
binner = cooler.create.HDF5Aggregator(
mock_pairs, chromsizes, bintable, chunksize=66
)
cooler.create.create(testcool_path, bintable, binner)
with h5py.File(testcool_path, "r") as h5:
oc1 = h5["indexes"]["chrom_offset"][:]
ob1 = h5["indexes"]["bin1_offset"][:]
p1 = cooler.api.pixels(h5, join=False)
# int32
mock_pairs32 = {
"chrms1": mock_pairs["chrms1"].astype(np.int32),
"cuts1": mock_pairs["cuts1"].astype(np.int32),
"chrms2": mock_pairs["chrms2"].astype(np.int32),
"cuts2": mock_pairs["cuts2"].astype(np.int32),
}
binner = cooler.create.HDF5Aggregator(
mock_pairs32, chromsizes, bintable, chunksize=66
)
cooler.create.create(testcool_path, bintable, binner)
with h5py.File(testcool_path, "r") as h5:
oc2 = h5["indexes"]["chrom_offset"][:]
ob2 = h5["indexes"]["bin1_offset"][:]
p2 = cooler.api.pixels(h5, join=False)
assert np.all(oc1 == oc2)
assert np.all(ob1 == ob2)
assert np.all(p1.values == p2.values)
def _mock_hdf5_pairs():
np.random.seed(1)
chrms = np.random.randint(0, n_chroms, n_records * 2)
cuts = np.random.randint(0, clen, n_records * 2)
abs_cuts = np.array([clen * chrm + cut for chrm, cut in zip(chrms, cuts)])
abs_cuts1, abs_cuts2 = abs_cuts[:n_records], abs_cuts[n_records:]
mock_pairs = {
"chrms1": chrms[:n_records],
"cuts1": cuts[:n_records],
"chrms2": chrms[n_records:],
"cuts2": cuts[n_records:],
}
# Triu-sort
mask = abs_cuts1 > abs_cuts2
mock_pairs["chrms1"][mask], mock_pairs["chrms2"][mask] = (
mock_pairs["chrms2"][mask],
mock_pairs["chrms1"][mask],
)
mock_pairs["cuts1"][mask], mock_pairs["cuts2"][mask] = (
mock_pairs["cuts2"][mask],
mock_pairs["cuts1"][mask],
)
abs_cuts1[mask], abs_cuts2[mask] = abs_cuts2[mask], abs_cuts1[mask]
idx = np.lexsort([abs_cuts2, abs_cuts1])
for key in mock_pairs:
mock_pairs[key] = mock_pairs[key][idx]
return mock_pairs
n_chroms = 2
clen = 2000
n_records = 3000
chromsizes = pd.Series(index=["chr1", "chr2"], data=[clen, clen])
mock_pairs = _mock_hdf5_pairs()
# uniform bins
bintable = cooler.binnify(chromsizes, 100)
should_not_depend_on_chunksize(chromsizes, bintable, mock_pairs)
should_raise_if_input_not_sorted(chromsizes, bintable, mock_pairs)
should_work_with_int32_cols(chromsizes, bintable, mock_pairs)
# non-uniform bins
bintable = _alternating_bins(chromsizes, [10, 100])
should_not_depend_on_chunksize(chromsizes, bintable, mock_pairs)
should_raise_if_input_not_sorted(chromsizes, bintable, mock_pairs)
should_work_with_int32_cols(chromsizes, bintable, mock_pairs)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
"bins_path,pairs_path,ref_path",
[
(
op.join(testdir, "data", "hg19.bins.2000kb.bed.gz"),
op.join(testdir, "data", "hg19.GM12878-MboI.pairs.subsample.sorted.txt.gz"),
op.join(testdir, "data", "hg19.GM12878-MboI.matrix.2000kb.cool"),
)
],
)
def test_cload_tabix(bins_path, pairs_path, ref_path):
cload_tabix.callback(
bins_path,
pairs_path,
testcool_path,
metadata=None,
assembly="hg19",
nproc=8,
zero_based=False,
max_split=2,
)
with h5py.File(testcool_path, "r") as f1, h5py.File(ref_path, "r") as f2:
assert np.all(f1["pixels/bin1_id"][:] == f2["pixels/bin1_id"][:])
assert np.all(f1["pixels/bin2_id"][:] == f2["pixels/bin2_id"][:])
assert np.all(f1["pixels/count"][:] == f2["pixels/count"][:])
try:
os.remove(testcool_path)
except OSError:
pass
@pytest.mark.parametrize(
"bins_path,pairs_path,ref_path",
[
(
op.join(testdir, "data", "hg19.bins.2000kb.bed.gz"),
op.join(testdir, "data", "hg19.GM12878-MboI.pairs.subsample.blksrt.txt.gz"),
op.join(testdir, "data", "hg19.GM12878-MboI.matrix.2000kb.cool"),
)
],
)
def test_cload_pairix(bins_path, pairs_path, ref_path):
cload_pairix.callback(
bins_path,
pairs_path,
testcool_path,
metadata=None,
assembly="hg19",
nproc=8,
zero_based=False,
max_split=2,
)
with h5py.File(testcool_path, "r") as f1, h5py.File(ref_path, "r") as f2:
assert np.all(f1["pixels/bin1_id"][:] == f2["pixels/bin1_id"][:])
assert np.all(f1["pixels/bin2_id"][:] == f2["pixels/bin2_id"][:])
assert np.all(f1["pixels/count"][:] == f2["pixels/count"][:])
try:
os.remove(testcool_path)
except OSError:
pass
@pytest.mark.skipif(
_pandas_major_version < 1,
reason="hash fix only works with pandas >= 1.0"
)
@pytest.mark.parametrize(
"bins_path,pairs_path,ref_path",
[
(
op.join(testdir, "data", "toy.bins.var.bed"),
op.join(testdir, "data", "toy.pairs"),
op.join(testdir, "data", "toy.symm.upper.var.cool"),
),
(
op.join(testdir, "data", "toy.bins.var.bed"),
op.join(testdir, "data", "toy_hash.pairs"),
op.join(testdir, "data", "toy.symm.upper.var.cool"),
),
(
op.join(testdir, "data", "toy.bins.var.bed"),
op.join(testdir, "data", "toy_hash.pairs.gz"),
op.join(testdir, "data", "toy.symm.upper.var.cool"),
)
],
)
def test_cload_pairs(bins_path, pairs_path, ref_path):
kwargs = dict(
metadata=None,
assembly="hg19",
chunksize=int(15e6),
zero_based=False,
comment_char="#",
input_copy_status="unique",
no_symmetric_upper=False,
field=(),
temp_dir=None,
no_delete_temp=False,
storage_options=None,
no_count=False,
max_merge=200,
chrom1=2,
pos1=3,
chrom2=4,
pos2=5,
)
cload_pairs.callback(bins_path, pairs_path, testcool_path, **kwargs)
with h5py.File(testcool_path, "r") as f1, h5py.File(ref_path, "r") as f2:
assert np.all(f1["pixels/bin1_id"][:] == f2["pixels/bin1_id"][:])
assert np.all(f1["pixels/bin2_id"][:] == f2["pixels/bin2_id"][:])
assert np.all(f1["pixels/count"][:] == f2["pixels/count"][:])
try:
os.remove(testcool_path)
except OSError:
pass
@pytest.mark.parametrize(
"bins_path,pairs_path",
[
(
op.join(testdir, "data", "toy.chrom.sizes") + ":2",
op.join(testdir, "data", "toy.pairs"),
)
],
)
def test_cload_field(bins_path, pairs_path):
kwargs = dict(
metadata=None,
assembly="toy",
chunksize=10,
zero_based=False,
comment_char="#",
input_copy_status="unique",
no_symmetric_upper=False,
temp_dir=None,
no_delete_temp=False,
storage_options=None,
no_count=True,
max_merge=200,
chrom1=2,
pos1=3,
chrom2=4,
pos2=5,
)
cload_pairs.callback(
bins_path, pairs_path, testcool_path, field=("score=8:dtype=float",), **kwargs
)
pixels = cooler.Cooler(testcool_path).pixels()[:]
assert "count" in pixels.columns and types.is_integer_dtype(pixels.dtypes["count"])
assert "score" in pixels.columns and types.is_float_dtype(pixels.dtypes["score"])
@pytest.mark.parametrize(
"bins_path,pairs_path",
[
(
op.join(testdir, "data", "toy.chrom.sizes") + ":2",
op.join(testdir, "data", "toy.pairs"),
)
],
)
def test_cload_custom_tempdir(bins_path, pairs_path):
for temp_dir in [op.join(testdir, "data"), "-"]:
cload_pairs.callback(
bins_path,
pairs_path,
testcool_path,
metadata=None,
assembly="toy",
chunksize=10,
zero_based=False,
comment_char="#",
input_copy_status="unique",
no_symmetric_upper=False,
field=(),
temp_dir=temp_dir,
no_delete_temp=False,
storage_options=None,
no_count=True,
max_merge=200,
chrom1=2,
pos1=3,
chrom2=4,
pos2=5,
)
pixels = cooler.Cooler(testcool_path).pixels()[:]
assert "count" in pixels.columns and types.is_integer_dtype(
pixels.dtypes["count"]
)
def test_load_bg2_vs_coo():
kwargs = dict(
metadata=None,
assembly="hg19",
chunksize=int(20e6),
field=(),
count_as_float=False,
one_based=False,
comment_char="#",
input_copy_status="unique",
no_symmetric_upper=False,
storage_options=None,
)
out_path1 = op.join(tmp, "test1.cool")
out_path2 = op.join(tmp, "test2.cool")
load.callback(
op.join(testdir, "data", "hg19.bins.2000kb.bed.gz"),
op.join(testdir, "data", "hg19.GM12878-MboI.matrix.2000kb.bg2.gz"),
out_path1,
format="bg2",
**kwargs
)
load.callback(
op.join(testdir, "data", "hg19.bins.2000kb.bed.gz"),
op.join(testdir, "data", "hg19.GM12878-MboI.matrix.2000kb.coo.txt"),
out_path2,
format="coo",
**kwargs
)
with h5py.File(out_path1, "r") as f1, h5py.File(out_path2, "r") as f2:
for col in ["bin1_id", "bin2_id", "count"]:
assert np.all(f1["pixels"][col][:] == f2["pixels"][col][:])
for fp in [out_path1, out_path2]:
try:
os.remove(fp)
except OSError:
pass
def test_load_zero_one_based_bg2():
kwargs = dict(
format="bg2",
metadata=None,
assembly="toy",
chunksize=10,
field=(),
count_as_float=False,
comment_char="#",
input_copy_status="unique",
no_symmetric_upper=False,
storage_options=None,
)
# 1-based-start BG2 input
ref = "toy.symm.upper.1.ob.bg2"
bins_path = op.join(testdir, "data", "toy.chrom.sizes") + ":1"
pixels_path = op.join(testdir, "data", ref)
load.callback(bins_path, pixels_path, testcool_path, one_based=True, **kwargs)
# reference, 1-based starts
ref_df = pd.read_csv(
pixels_path,
sep="\t",
names=["chrom1", "start1", "end1", "chrom2", "start2", "end2", "count"],
)
# output
out_df = cooler.Cooler(testcool_path).pixels(join=True)[:]
out_df["start1"] += 1
out_df["start2"] += 1
assert np.all(out_df == ref_df)
# 0-based-start BG2 input
ref = "toy.symm.upper.1.zb.bg2"
bins_path = op.join(testdir, "data", "toy.chrom.sizes") + ":1"
pixels_path = op.join(testdir, "data", ref)
load.callback(bins_path, pixels_path, testcool_path, one_based=False, **kwargs)
# reference, 0-based starts
ref_df = pd.read_csv(
pixels_path,
sep="\t",
names=["chrom1", "start1", "end1", "chrom2", "start2", "end2", "count"],
)
# output
out_df = cooler.Cooler(testcool_path).pixels(join=True)[:]
assert np.all(out_df == ref_df)
def test_load_zero_one_based_coo():
kwargs = dict(
format="coo",
metadata=None,
assembly="toy",
chunksize=10,
field=(),
count_as_float=False,
comment_char="#",
input_copy_status="unique",
no_symmetric_upper=False,
storage_options=None,
)
# 1-based-start COO input
ref = "toy.symm.upper.1.ob.coo"
bins_path = op.join(testdir, "data", "toy.chrom.sizes") + ":1"
pixels_path = op.join(testdir, "data", ref)
load.callback(bins_path, pixels_path, testcool_path, one_based=True, **kwargs)
# reference, 1-based starts
ref_df = pd.read_csv(pixels_path, sep="\t", names=["bin1_id", "bin2_id", "count"])
# output
out_df = cooler.Cooler(testcool_path).pixels()[:]
out_df["bin1_id"] += 1
out_df["bin2_id"] += 1
assert np.all(out_df == ref_df)
# 0-based-start COO input
ref = "toy.symm.upper.1.zb.coo"
bins_path = op.join(testdir, "data", "toy.chrom.sizes") + ":1"
pixels_path = op.join(testdir, "data", ref)
load.callback(bins_path, pixels_path, testcool_path, one_based=False, **kwargs)
# reference, 0-based starts
ref_df = pd.read_csv(pixels_path, sep="\t", names=["bin1_id", "bin2_id", "count"])
# output
out_df = cooler.Cooler(testcool_path).pixels()[:]
assert np.all(out_df == ref_df)
def test_array_loader():
chromsizes = cooler.util.read_chromsizes(
op.join(testdir, "data", "toy.chrom.sizes")
)
bins = cooler.util.binnify(chromsizes, 10)
n = len(bins)
array = np.ones((n, n))
iterator = cooler.create.ArrayLoader(bins, array, chunksize=100)
list(iterator)
array = np.ones((n + 1, n + 1))
with pytest.raises(ValueError):
cooler.create.ArrayLoader(bins, array, chunksize=100)
|
bsd-3-clause
|
bradmontgomery/ml
|
book/ch06/04_sent.py
|
6
|
10142
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import re
import nltk
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.base import BaseEstimator
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "04"
import json
poscache_filename = "poscache.json"
try:
poscache = json.load(open(poscache_filename, "r"))
except IOError:
poscache = {}
class LinguisticVectorizer(BaseEstimator):
def get_feature_names(self):
return np.array(['sent_neut', 'sent_pos', 'sent_neg',
'nouns', 'adjectives', 'verbs', 'adverbs',
'allcaps', 'exclamation', 'question'])
def fit(self, documents, y=None):
return self
def _get_sentiments(self, d):
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
sent = tuple(nltk.word_tokenize(d))
if poscache is not None:
if d in poscache:
tagged = poscache[d]
else:
poscache[d] = tagged = nltk.pos_tag(sent)
else:
tagged = nltk.pos_tag(sent)
pos_vals = []
neg_vals = []
nouns = 0.
adjectives = 0.
verbs = 0.
adverbs = 0.
for w, t in tagged:
p, n = 0, 0
sent_pos_type = None
if t.startswith("NN"):
sent_pos_type = "n"
nouns += 1
elif t.startswith("JJ"):
sent_pos_type = "a"
adjectives += 1
elif t.startswith("VB"):
sent_pos_type = "v"
verbs += 1
elif t.startswith("RB"):
sent_pos_type = "r"
adverbs += 1
if sent_pos_type is not None:
sent_word = "%s/%s" % (sent_pos_type, w)
if sent_word in sent_word_net:
p, n = sent_word_net[sent_word]
pos_vals.append(p)
neg_vals.append(n)
l = len(sent)
avg_pos_val = np.mean(pos_vals)
avg_neg_val = np.mean(neg_vals)
return [1 - avg_pos_val - avg_neg_val, avg_pos_val, avg_neg_val,
nouns / l, adjectives / l, verbs / l, adverbs / l]
def transform(self, documents):
obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs = np.array(
[self._get_sentiments(d) for d in documents]).T
allcaps = []
exclamation = []
question = []
for d in documents:
allcaps.append(
np.sum([t.isupper() for t in d.split() if len(t) > 2]))
exclamation.append(d.count("!"))
question.append(d.count("?"))
result = np.array(
[obj_val, pos_val, neg_val, nouns, adjectives, verbs, adverbs, allcaps,
exclamation, question]).T
return result
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in emo_repl.keys()]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_union_model(params=None):
def preprocessor(tweet):
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.iteritems():
tweet = re.sub(r, repl, tweet)
return tweet.replace("-", " ").replace("_", " ")
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
ling_stats = LinguisticVectorizer()
all_features = FeatureUnion(
[('ling', ling_stats), ('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('tfidf', tfidf_ngrams)])
#all_features = FeatureUnion([('ling', ling_stats)])
clf = MultinomialNB()
pipeline = Pipeline([('all', all_features), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def __grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print clf
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print "%.3f\t%.3f\t%.3f\t%.3f\t" % summary
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in xrange(len(X_wrong)):
print "clf.predict('%s')=%i instead of %i" %\
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx])
def get_best_model():
best_params = dict(all__tfidf__ngram_range=(1, 2),
all__tfidf__min_df=1,
all__tfidf__stop_words=None,
all__tfidf__smooth_idf=False,
all__tfidf__use_idf=False,
all__tfidf__sublinear_tf=True,
all__tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_union_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
#from sklearn.utils import shuffle
# print "shuffle, sample"
#X_orig, Y_orig = shuffle(X_orig, Y_orig)
#X_orig = X_orig[:100,]
#Y_orig = Y_orig[:100,]
classes = np.unique(Y_orig)
for c in classes:
print "#%s: %i" % (c, sum(Y_orig == c))
print "== Pos vs. neg =="
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print "== Pos/neg vs. irrelevant/neutral =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print "== Pos vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print "== Neg vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print "time spent:", time.time() - start_time
json.dump(poscache, open(poscache_filename, "w"))
|
mit
|
cbertinato/pandas
|
pandas/io/formats/format.py
|
1
|
56367
|
"""
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from functools import partial
from io import StringIO
from shutil import get_terminal_size
from unicodedata import east_asian_width
import numpy as np
from pandas._config.config import get_option, set_option
from pandas._libs import lib
from pandas._libs.tslib import format_array_from_datetime
from pandas._libs.tslibs import NaT, Timedelta, Timestamp, iNaT
from pandas.core.dtypes.common import (
is_categorical_dtype, is_complex_dtype, is_datetime64_dtype,
is_datetime64tz_dtype, is_extension_array_dtype, is_float, is_float_dtype,
is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_scalar,
is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCIndexClass, ABCMultiIndex, ABCSeries, ABCSparseArray)
from pandas.core.dtypes.missing import isna, notna
from pandas.core.base import PandasObject
import pandas.core.common as com
from pandas.core.index import Index, ensure_index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.io.common import _expand_user, _stringify_path
from pandas.io.formats.printing import adjoin, justify, pprint_thing
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : %(col_space_type)s, optional
%(col_space)s.
header : bool, optional
%(header)s.
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
"""
_VALID_JUSTIFY_PARAMETERS = ("left", "right", "center", "justify",
"justify-all", "start", "end", "inherit",
"match-parent", "initial", "unset")
return_docstring = """
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
"""
class CategoricalFormatter:
def __init__(self, categorical, buf=None, length=True, na_rep='NaN',
footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO("")
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.length:
if footer:
footer += ', '
footer += "Length: {length}".format(length=len(self.categorical))
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return str(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None, na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return ''
fmt_values = self._get_formatted_values()
result = ['{i}'.format(i=i) for i in fmt_values]
result = [i.strip() for i in result]
result = ', '.join(result)
result = ['[' + result + ']']
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return str('\n'.join(result))
class SeriesFormatter:
def __init__(self, series, buf=None, length=True, header=True, index=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num],
series.iloc[-row_num:]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = ''
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: {freq}'.format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += (("Name: {sname}".format(sname=series_name))
if name is not None else "")
if (self.length is True or
(self.length == 'truncate' and self.truncate_v)):
if footer:
footer += ', '
footer += 'Length: {length}'.format(length=len(self.series))
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: {typ}'.format(typ=pprint_thing(name))
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return str(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, ABCMultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
values_to_format = self.tr_series._formatting_values()
return format_array(values_to_format, None,
float_format=self.float_format, na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return "{name}([], {footer})".format(
name=self.series.__class__.__name__, footer=footer)
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode='center')[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values)
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return str(''.join(result))
class TextAdjustment:
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return len(text)
def justify(self, texts, max_len, mode='right'):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len,
justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super().__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
self._EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
def len(self, text):
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width)
for c in text)
def justify(self, texts, max_len, mode='right'):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == 'left':
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == 'center':
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter:
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return (self.show_dimensions is True or
(self.show_dimensions == 'truncate' and self.is_truncated))
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += common_docstring + return_docstring
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.',
table_id=None, render_links=False, **kwds):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
self.table_id = table_id
self.render_links = render_links
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = (self.header + dot_row + show_dimension_rows +
prompt_row)
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num],
frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :],
frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
else:
self.tr_row_num = None
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(('Writing {ncols} cols but got {nalias} '
'aliases'
.format(ncols=len(self.columns),
nalias=len(self.header))))
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
if self.show_row_idx_names:
for x in str_columns:
x.append('')
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=header_colwidth,
adj=self.adj)
max_len = max(max(self.adj.len(x) for x in fmt_values),
header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
strcols.insert(self.tr_col_num + 1, [' ...'] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = 4
dot_mode = 'right'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = ('Empty {name}\nColumns: {col}\nIndex: {idx}'
.format(name=type(self.frame).__name__,
col=pprint_thing(frame.columns),
idx=pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (not isinstance(self.max_cols, int) or
self.max_cols > 0): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
max_len = Series(text).str.len().max()
# plus truncate dot col
dif = max_len - self.w
# '+ 1' to avoid too wide repr (GH PR #17023)
adj_dif = dif + 1
col_lens = Series([Series(ele).apply(len).max()
for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
# adjoin adds one
adj_dif -= (col_len + 1)
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
# subtract index column
max_cols_adj = n_cols - self.index
# GH-21180. Ensure that we print at least two.
max_cols_adj = max(max_cols_adj, 2)
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[{nrows} rows x {ncols} columns]"
.format(nrows=len(frame), ncols=len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x)
for x in idx]).max() + adjoin_width
col_widths = [np.array([self.adj.len(x) for x in col]).max() if
len(col) > 0 else 0 for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False, encoding=None,
multicolumn=False, multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
from pandas.io.formats.latex import LatexFormatter
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if encoding is None:
encoding = 'utf-8'
if hasattr(self.buf, 'write'):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, str):
import codecs
with codecs.open(self.buf, 'w', encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
values_to_format = frame.iloc[:, i]._formatting_values()
return format_array(values_to_format, formatter,
float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space, decimal=self.decimal)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.display.html.border``.
.. versionadded:: 0.19.0
"""
from pandas.io.formats.html import HTMLFormatter, NotebookFormatter
Klass = NotebookFormatter if notebook else HTMLFormatter
html = Klass(self, classes=classes, border=border).render()
if hasattr(self.buf, 'write'):
buffer_put_lines(self.buf, html)
elif isinstance(self.buf, str):
with open(self.buf, 'w') as f:
buffer_put_lines(f, html)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, ABCMultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = list(zip(*fmt_columns))
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(l.is_floating for l in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (y not in self.formatters and
need_leadsp[x] and not restrict_formatting):
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x]
for x in fmt_columns]))
if self.sparsify and len(str_columns):
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x if not self._get_formatter(i) and
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
@property
def show_row_idx_names(self):
return all((self.has_index_names,
self.index,
self.show_index_names))
@property
def show_col_idx_names(self):
return all((self.has_column_names,
self.show_index_names,
self.header))
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
fmt = self._get_formatter('__index__')
if isinstance(index, ABCMultiIndex):
fmt_index = index.format(
sparsify=self.sparsify, adjoin=False,
names=self.show_row_idx_names, formatter=fmt)
else:
fmt_index = [index.format(
name=self.show_row_idx_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj)) for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if self.show_col_idx_names:
col_header = ['{x}'.format(x=x)
for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, ABCMultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.',
leading_space=None):
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal,
leading_space=leading_space)
return fmt_obj.get_result()
class GenericArrayFormatter:
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right', decimal='.',
quoting=None, fixed_width=True, leading_space=None):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
self.leading_space = leading_space
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = ('{{x: .{prec:d}g}}'
.format(prec=get_option("display.precision")))
float_format = lambda x: fmt_str.format(x=x)
else:
float_format = self.float_format
formatter = (
self.formatter if self.formatter is not None else
(lambda x: pprint_thing(x, escape_chars=('\t', '\r', '\n'))))
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
try:
# try block for np.isnat specifically
# determine na_rep if x is None or NaT-like
if x is None:
return 'None'
elif x is NaT or np.isnat(x):
return 'NaT'
except (TypeError, ValueError):
# np.isnat only handles datetime or timedelta objects
pass
return self.na_rep
elif isinstance(x, PandasObject):
return '{x}'.format(x=x)
else:
# object dtype
return '{x}'.format(x=formatter(x))
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notna(vals)
leading_space = self.leading_space
if leading_space is None:
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(' {v}'.format(v=_format(v)))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
if leading_space is False:
# False specifically, so that the default is
# to include a space if we get here.
tpl = '{v}'
else:
tpl = ' {v}'
fmt_values.append(tpl.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
# GH21625, GH22270
self.fixed_width = False
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == 'left':
na_rep = ' ' + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '{x: d}'.format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values,
self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
return fmt_values.tolist()
class ExtensionArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
values = self.values
if isinstance(values, (ABCIndexClass, ABCSeries)):
values = values._values
formatter = values._formatter(boxed=True)
if is_categorical_dtype(values.dtype):
# Categorical is special for now, so that we can preserve tzinfo
array = values.get_values()
else:
array = np.asarray(values)
fmt_values = format_array(array,
formatter,
float_format=self.float_format,
na_rep=self.na_rep, digits=self.digits,
space=self.space, justify=self.justify,
leading_space=self.leading_space)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = np.isclose(percentiles.astype(int), percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % int(one_day_nanos) != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
if getattr(x, 'tzinfo', None) is not None:
x = Timestamp(x).tz_convert(tz)
else:
x = Timestamp(x).tz_localize(tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = (self.formatter or
_get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box))
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = None
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None, adj=None):
if len(strings) == 0 or justify == 'all':
return strings
if adj is None:
adj = _get_adjustment()
max_len = max(adj.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[:max_len - 3] + '...'
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros_complex(str_complexes, na_rep='NaN'):
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
def separate_and_trim(str_complex, na_rep):
num_arr = str_complex.split('+')
return (_trim_zeros_float([num_arr[0]], na_rep) +
['+'] +
_trim_zeros_float([num_arr[1][:-1]], na_rep) +
['j'])
return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
def _trim_zeros_float(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _is_number(x):
return (x != na_rep and not x.endswith('inf'))
def _cond(values):
finite = [x for x in values if _is_number(x)]
return (len(finite) > 0 and all(x.endswith('0') for x in finite) and
not (any(('e' in x) or ('E' in x) for x in finite)))
while _cond(trimmed):
trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and _is_number(x) else x
for x in trimmed]
def _has_names(index):
if isinstance(index, ABCMultiIndex):
return com._any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter:
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return 'NaN'
if decimal.Decimal.is_infinite(dnum):
return 'inf'
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-{pow10:02d}'.format(pow10=-int_pow10)
else:
prefix = 'E+{pow10:02d}'.format(pow10=int_pow10)
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = "{mant: g}{prefix}"
else:
format_str = ("{{mant: .{acc:d}f}}{{prefix}}"
.format(acc=self.accuracy))
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
-------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
def buffer_put_lines(buf, lines):
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write('\n'.join(lines))
|
bsd-3-clause
|
davebrent/aubio
|
python/demos/demo_mfcc.py
|
3
|
1700
|
#! /usr/bin/env python
import sys
from aubio import source, pvoc, mfcc
from numpy import array, vstack, zeros
win_s = 512 # fft size
hop_s = win_s / 4 # hop size
n_filters = 40
n_coeffs = 13
samplerate = 44100
if len(sys.argv) < 2:
print "Usage: %s <source_filename>" % sys.argv[0]
sys.exit(1)
source_filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(source_filename, samplerate, hop_s)
samplerate = s.samplerate
p = pvoc(win_s, hop_s)
m = mfcc(win_s, n_filters, n_coeffs, samplerate)
mfccs = zeros([13,])
frames_read = 0
while True:
samples, read = s()
spec = p(samples)
mfcc_out = m(spec)
mfccs = vstack((mfccs, mfcc_out))
frames_read += read
if read < hop_s: break
# do plotting
from numpy import arange
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
import matplotlib.pyplot as plt
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot( source_filename, samplerate, block_size = hop_s, ax = wave)
wave.xaxis.set_visible(False)
wave.yaxis.set_visible(False)
all_times = arange(mfccs.shape[0]) * hop_s
n_coeffs = mfccs.shape[1]
for i in range(n_coeffs):
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_coeffs), 0.8, 0.65 / n_coeffs], sharex = wave )
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.plot(all_times, mfccs.T[i])
# add time to the last axis
set_xlabels_sample2time( ax, frames_read, samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
wave.set_title('MFCC for %s' % source_filename)
plt.show()
|
gpl-3.0
|
gpiatkovska/Machine-Learning-in-Python
|
Ex3_MultiLogisticReg_NN/ex3_nn.py
|
1
|
3085
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 14:25:42 2015
@author: Hanna
"""
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.io as io
def displayData(X):
pixels = 20 # images are 20x20 pixels
#100 examples shown on a 10 by 10 square
display_rows = 10
display_cols = 10
out = np.zeros((pixels*display_rows,pixels*display_cols))
rand_indices = np.random.permutation(5000)[0:display_rows*display_cols]
for j in range(0, display_rows):
for i in range(0, display_cols):
start_i = i*pixels
start_j = j*pixels
out[start_i:start_i+pixels, start_j:start_j+pixels] = X[rand_indices[display_rows*j+i]].reshape(pixels, pixels).T
fig = plt.figure()
ax = fig.gca()
ax.imshow(out,cmap="Greys_r")
ax.set_axis_off()
plt.savefig("100dataExamples.pdf")
plt.show()
def sigmoid(z):
#works elementwise for any array z
return sp.special.expit(z) # expit(x) = 1/(1+exp(-x)) elementwise for array x
def forward(Theta1,Theta2,X): # works for any # of examples
#forward propagation:
#input activation
m = np.shape(X)[0] # # of examples
a1 = X.T
a1 = np.vstack((np.ones((1,m)),a1))
#hidden layer activation
z2 = np.dot(Theta1,a1)
a2 = sigmoid(z2)
a2 = np.vstack((np.ones((1,m)),a2))
#output layer activation
z3 = np.dot(Theta2,a2)
a3 = sigmoid(z3)
return a3
def predictOneVsAllNN(h):
all_probability = h.T
prediction = np.argmax(all_probability,axis=1) + 1 # we do not have class 0 but have class 10
return prediction.reshape(np.shape(h)[1],1)
if __name__ == '__main__':
#load data
mat = io.loadmat("ex3data1.mat")
X, y = mat['X'], mat['y']
#display 100 random examples
displayData(X)
#load already trained weights
mat = io.loadmat("ex3weights.mat")
Theta1, Theta2 = mat['Theta1'], mat['Theta2']
#Theta1 and Theta2 correspond to a network with:
#400 (+1 bias) input units (= # of feature -- 20x20 image)
#one hidden layer with 25 (+1 bias) units
#10 output units corresponding to 10 classes
print(np.shape(Theta1)) # Theta1 shape is (25,401)
print(np.shape(Theta2)) # Theta2 shape is (10,26)
#NN prediction
h = forward(Theta1,Theta2,X)
prediction = predictOneVsAllNN(h)
training_accuracy = np.mean(prediction == y) * 100.0
print("NN training set prediction accuracy = ", training_accuracy,"%") # get 97.52 %
print("supposed to be 97.5")
#show images and print corresponding predictions one by one
m = np.shape(X)[0] # # of examples
sequence = np.random.permutation(m)
print("Note that 0 is labeled by 10")
plt.ion()
for i in sequence:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(X[i,:].reshape(20, 20).T, cmap="Greys_r")
ax.set_axis_off()
print(prediction[i,0])
input("Press Enter to continue...")
plt.close(fig)
|
mit
|
SamStudio8/scikit-bio
|
skbio/io/format/tests/test_ordination.py
|
8
|
11787
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
import io
from unittest import TestCase, main
import numpy as np
import pandas as pd
import numpy.testing as npt
from skbio import OrdinationResults
from skbio.io import OrdinationFormatError
from skbio.io.format.ordination import (
_ordination_to_ordination_results, _ordination_results_to_ordination,
_ordination_sniffer)
from skbio.util import get_data_path, assert_ordination_results_equal
class OrdinationTestData(TestCase):
def setUp(self):
self.valid_fps = map(
get_data_path,
['ordination_L&L_CA_data_scores', 'ordination_example3_scores',
'ordination_PCoA_sample_data_3_scores',
'ordination_example2_scores'])
# Store filepath, regex for matching the error message that should be
# raised when reading the file, and whether the file should be matched
# by the sniffer (True) or not (False).
self.invalid_fps = map(lambda e: (get_data_path(e[0]), e[1], e[2]), [
('empty', 'end of file.*Eigvals header', False),
('whitespace_only', 'Eigvals header not found', False),
('ordination_error1', 'Eigvals header not found', False),
('ordination_error2',
'Proportion explained header not found', False),
('ordination_error3', 'Species header not found', True),
('ordination_error4', 'Site header not found', True),
('ordination_error5', 'Biplot header not found', True),
('ordination_error6', 'Site constraints header not found', True),
('ordination_error7', 'empty line', False),
('ordination_error8', '9.*Proportion explained.*8', True),
('ordination_error9', '2 values.*1 in row 1', True),
('ordination_error10', '2 values.*1 in row 1', True),
('ordination_error11', 'Site constraints ids and site ids', True),
('ordination_error12', '9.*Eigvals.*8', True),
('ordination_error13', '9.*Proportion explained.*8', True),
('ordination_error14', 'Site is 0: 9 x 0', True),
('ordination_error15', '9 values.*8 in row 1', True),
('ordination_error16', 'Biplot is 0: 3 x 0', True),
('ordination_error17', '3 values.*2 in row 1', True),
('ordination_error18',
'proportion explained.*eigvals: 8 != 9', True),
('ordination_error19',
'coordinates.*species.*eigvals: 1 != 2', True),
('ordination_error20', 'coordinates.*site.*eigvals: 1 != 2', True),
('ordination_error21', 'one eigval', False),
('ordination_error22', 'end of file.*blank line', False),
('ordination_error23', 'end of file.*Proportion explained section',
True),
('ordination_error24', 'end of file.*row 2.*Species section', True)
])
class OrdinationResultsReaderWriterTests(OrdinationTestData):
def setUp(self):
super(OrdinationResultsReaderWriterTests, self).setUp()
# define in-memory results, one for each of the valid files in
# self.valid_fps
# CA results
axes_ids = ['CA1', 'CA2']
species_ids = ['Species1', 'Species2', 'Species3']
site_ids = ['Site1', 'Site2', 'Site3']
eigvals = pd.Series([0.0961330159181, 0.0409418140138], axes_ids)
species = pd.DataFrame([[0.408869425742, 0.0695518116298],
[-0.1153860437, -0.299767683538],
[-0.309967102571, 0.187391917117]],
index=species_ids, columns=axes_ids)
site = pd.DataFrame([[-0.848956053187, 0.882764759014],
[-0.220458650578, -1.34482000302],
[1.66697179591, 0.470324389808]],
index=site_ids, columns=axes_ids)
biplot = None
site_constraints = None
prop_explained = None
ca_scores = OrdinationResults(
'CA', 'Correspondence Analysis', eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
# CCA results
axes_ids = ['CCA%d' % i for i in range(1, 10)]
species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5', 'Species6', 'Species7',
'Species8']
site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
'Site6', 'Site7', 'Site8', 'Site9']
eigvals = pd.Series([0.366135830393, 0.186887643052, 0.0788466514249,
0.082287840501, 0.0351348475787, 0.0233265839374,
0.0099048981912, 0.00122461669234,
0.000417454724117], axes_ids)
species = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_CCA_species')),
index=species_ids, columns=axes_ids)
site = pd.DataFrame(
np.loadtxt(get_data_path('ordination_exp_Ordination_CCA_site')),
index=site_ids, columns=axes_ids)
biplot = pd.DataFrame(
[[-0.169746767979, 0.63069090084, 0.760769036049],
[-0.994016563505, 0.0609533148724, -0.0449369418179],
[0.184352565909, -0.974867543612, 0.0309865007541]],
columns=axes_ids[:3])
site_constraints = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_CCA_site_constraints')),
index=site_ids, columns=axes_ids)
prop_explained = None
cca_scores = OrdinationResults('CCA',
'Canonical Correspondence Analysis',
eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
# PCoA results
axes_ids = ['PC%d' % i for i in range(1, 10)]
species_ids = None
site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
'PC.355', 'PC.607', 'PC.634']
eigvals = pd.Series([0.512367260461, 0.300719094427, 0.267912066004,
0.208988681078, 0.19169895326, 0.16054234528,
0.15017695712, 0.122457748167, 0.0], axes_ids)
species = None
site = pd.DataFrame(
np.loadtxt(get_data_path('ordination_exp_Ordination_PCoA_site')),
index=site_ids, columns=axes_ids)
biplot = None
site_constraints = None
prop_explained = pd.Series([0.267573832777, 0.15704469605,
0.139911863774, 0.109140272454,
0.100111048503, 0.0838401161912,
0.0784269939011, 0.0639511763509, 0.0],
axes_ids)
pcoa_scores = OrdinationResults('PCoA',
'Principal Coordinate Analysis',
eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
# RDA results
axes_ids = ['RDA%d' % i for i in range(1, 8)]
species_ids = ['Species0', 'Species1', 'Species2', 'Species3',
'Species4', 'Species5']
site_ids = ['Site0', 'Site1', 'Site2', 'Site3', 'Site4', 'Site5',
'Site6', 'Site7', 'Site8', 'Site9']
eigvals = pd.Series([25.8979540892, 14.9825779819, 8.93784077262,
6.13995623072, 1.68070536498, 0.57735026919,
0.275983624351], axes_ids)
species = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_RDA_species')),
index=species_ids, columns=axes_ids)
site = pd.DataFrame(
np.loadtxt(get_data_path('ordination_exp_Ordination_RDA_site')),
index=site_ids, columns=axes_ids)
biplot = pd.DataFrame(
[[0.422650019179, -0.559142585857, -0.713250678211],
[0.988495963777, 0.150787422017, -0.0117848614073],
[-0.556516618887, 0.817599992718, 0.147714267459],
[-0.404079676685, -0.9058434809, -0.127150316558]],
columns=axes_ids[:3])
site_constraints = pd.DataFrame(np.loadtxt(
get_data_path('ordination_exp_Ordination_RDA_site_constraints')),
index=site_ids, columns=axes_ids)
prop_explained = None
rda_scores = OrdinationResults(
'RDA', 'Redundancy Analysis', eigvals=eigvals, features=species,
samples=site, biplot_scores=biplot,
sample_constraints=site_constraints,
proportion_explained=prop_explained)
self.ordination_results_objs = [ca_scores, cca_scores, pcoa_scores,
rda_scores]
def test_read_valid_files(self):
for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
obs = _ordination_to_ordination_results(fp)
assert_ordination_results_equal(
obs, obj, ignore_method_names=True,
ignore_axis_labels=True, ignore_biplot_scores_labels=True)
def test_read_invalid_files(self):
for invalid_fp, error_msg_regexp, _ in self.invalid_fps:
with six.assertRaisesRegex(self, OrdinationFormatError,
error_msg_regexp):
_ordination_to_ordination_results(invalid_fp)
def test_write(self):
for fp, obj in zip(self.valid_fps, self.ordination_results_objs):
fh = io.StringIO()
_ordination_results_to_ordination(obj, fh)
obs = fh.getvalue()
fh.close()
with io.open(fp) as fh:
exp = fh.read()
npt.assert_equal(obs, exp)
def test_roundtrip_read_write(self):
for fp in self.valid_fps:
# Read.
obj1 = _ordination_to_ordination_results(fp)
# Write.
fh = io.StringIO()
_ordination_results_to_ordination(obj1, fh)
fh.seek(0)
# Read.
obj2 = _ordination_to_ordination_results(fh)
fh.close()
assert_ordination_results_equal(obj1, obj2)
class SnifferTests(OrdinationTestData):
def setUp(self):
super(SnifferTests, self).setUp()
def test_matches_and_nonmatches(self):
# Sniffer should match all valid files, and will match some invalid
# ones too because it doesn't exhaustively check the entire file.
for fp in self.valid_fps:
self.assertEqual(_ordination_sniffer(fp), (True, {}))
for fp, _, expected_sniffer_match in self.invalid_fps:
self.assertEqual(_ordination_sniffer(fp),
(expected_sniffer_match, {}))
if __name__ == '__main__':
main()
|
bsd-3-clause
|
wkfwkf/statsmodels
|
statsmodels/stats/tests/test_power.py
|
28
|
25876
|
# -*- coding: utf-8 -*-
# pylint: disable=W0231, W0142
"""Tests for statistical power calculations
Note:
tests for chisquare power are in test_gof.py
Created on Sat Mar 09 08:44:49 2013
Author: Josef Perktold
"""
import copy
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose, assert_raises,
assert_equal, assert_warns)
import statsmodels.stats.power as smp
import warnings
#from .test_weightstats import CheckPowerMixin
from statsmodels.stats.tests.test_weightstats import Holder
# for testing plots
import nose
from numpy.testing import dec
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except ImportError:
have_matplotlib = False
class CheckPowerMixin(object):
def test_power(self):
#test against R results
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res1 = self.cls()
assert_almost_equal(res1.power(**kwds), self.res2.power, decimal=decimal)
def test_positional(self):
res1 = self.cls()
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
# positional args
if hasattr(self, 'args_names'):
args_names = self.args_names
else:
nobs_ = 'nobs' if 'nobs' in kwds else 'nobs1'
args_names = ['effect_size', nobs_, 'alpha']
# pop positional args
args = [kwds.pop(arg) for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal
else:
decimal = 6
res = res1.power(*args, **kwds)
assert_almost_equal(res, self.res2.power, decimal=decimal)
def test_roots(self):
kwds = copy.copy(self.kwds)
kwds.update(self.kwds_extra)
# kwds_extra are used as argument, but not as target for root
for key in self.kwds:
# keep print to check whether tests are really executed
#print 'testing roots', key
value = kwds[key]
kwds[key] = None
result = self.cls().solve_power(**kwds)
assert_allclose(result, value, rtol=0.001, err_msg=key+' failed')
# yield can be used to investigate specific errors
#yield assert_allclose, result, value, 0.001, 0, key+' failed'
kwds[key] = value # reset dict
@dec.skipif(not have_matplotlib)
def test_power_plot(self):
if self.cls == smp.FTestPower:
raise nose.SkipTest('skip FTestPower plot_power')
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
fig = self.cls().plot_power(dep_var='nobs',
nobs= np.arange(2, 100),
effect_size=np.array([0.1, 0.2, 0.3, 0.5, 1]),
#alternative='larger',
ax=ax, title='Power of t-Test',
**self.kwds_extra)
ax = fig.add_subplot(2,1,2)
fig = self.cls().plot_power(dep_var='es',
nobs=np.array([10, 20, 30, 50, 70, 100]),
effect_size=np.linspace(0.01, 2, 51),
#alternative='larger',
ax=ax, title='',
**self.kwds_extra)
plt.close('all')
#''' test cases
#one sample
# two-sided one-sided
#large power OneS1 OneS3
#small power OneS2 OneS4
#
#two sample
# two-sided one-sided
#large power TwoS1 TwoS3
#small power TwoS2 TwoS4
#small p, ratio TwoS4 TwoS5
#'''
class TestTTPowerOneS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.9995636009612725
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS2(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.1359562887679666
res2.alternative = 'two.sided'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {}
self.cls = smp.TTestPower
class TestTTPowerOneS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, prefix='tt_power1_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.999892010204909
res2.alternative = 'greater'
res2.note = 'NULL'
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS4(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.05,n=20,sig.level=0.05,type="one.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.05
res2.sig_level = 0.05
res2.power = 0.0764888785042198
res2.alternative = 'greater'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestPower
class TestTTPowerOneS5(CheckPowerMixin):
# case one-sided less, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.2
res2.sig_level = 0.05
res2.power = 0.006063932667926375
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerOneS6(CheckPowerMixin):
# case one-sided less, negative effect size, not implemented yet
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=-0.2,n=20,sig.level=0.05,type="one.sample",alternative="less")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = -0.2
res2.sig_level = 0.05
res2.power = 0.21707518167191
res2.alternative = 'less'
res2.note = '''NULL'''
res2.method = 'One-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs': res2.n,
'alpha': res2.sig_level, 'power': res2.power}
self.kwds_extra = {'alternative': 'smaller'}
self.cls = smp.TTestPower
class TestTTPowerTwoS1(CheckPowerMixin):
def __init__(self):
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, prefix='tt_power2_1.')
res2 = Holder()
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.967708258242517
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.1,n=20,sig.level=0.05,type="two.sample",alternative="two.sided")
#> cat_items(p, "res2.")
res2.n = 20
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.06095912465411235
res2.alternative = 'two.sided'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power': res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS3(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=1,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, prefix='tt_power2_1g.')
res2.n = 30
res2.d = 1
res2.sig_level = 0.05
res2.power = 0.985459690251624
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS4(CheckPowerMixin):
# case with small power
def __init__(self):
res2 = Holder()
#> p = pwr.t.test(d=0.01,n=30,sig.level=0.05,type="two.sample",alternative="greater")
#> cat_items(p, "res2.")
res2.n = 30
res2.d = 0.01
res2.sig_level = 0.05
res2.power = 0.0540740302835667
res2.alternative = 'greater'
res2.note = 'n is number in *each* group'
res2.method = 'Two-sample t test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS5(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="two.sided")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.0633081832564667
res2.alternative = 'two.sided'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'two-sided'}
self.cls = smp.TTestIndPower
class TestTTPowerTwoS6(CheckPowerMixin):
# case with unequal n, ratio>1
def __init__(self):
res2 = Holder()
#> p = pwr.t2n.test(d=0.1,n1=20, n2=30,sig.level=0.05,alternative="greater")
#> cat_items(p, "res2.")
res2.n1 = 20
res2.n2 = 30
res2.d = 0.1
res2.sig_level = 0.05
res2.power = 0.09623589080917805
res2.alternative = 'greater'
res2.method = 't test power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n1,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1.5}
self.kwds_extra = {'alternative': 'larger'}
self.cls = smp.TTestIndPower
def test_normal_power_explicit():
# a few initial test cases for NormalIndPower
sigma = 1
d = 0.3
nobs = 80
alpha = 0.05
res1 = smp.normal_power(d, nobs/2., 0.05)
res2 = smp.NormalIndPower().power(d, nobs, 0.05)
res3 = smp.NormalIndPower().solve_power(effect_size=0.3, nobs1=80, alpha=0.05, power=None)
res_R = 0.475100870572638
assert_almost_equal(res1, res_R, decimal=13)
assert_almost_equal(res2, res_R, decimal=13)
assert_almost_equal(res3, res_R, decimal=13)
norm_pow = smp.normal_power(-0.01, nobs/2., 0.05)
norm_pow_R = 0.05045832927039234
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="two.sided")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
norm_pow = smp.NormalIndPower().power(0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.056869534873146124
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="greater")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
# Note: negative effect size is same as switching one-sided alternative
# TODO: should I switch to larger/smaller instead of "one-sided" options
norm_pow = smp.NormalIndPower().power(-0.01, nobs, 0.05,
alternative="larger")
norm_pow_R = 0.0438089705093578
#value from R: >pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
assert_almost_equal(norm_pow, norm_pow_R, decimal=11)
class TestNormalIndPower1(CheckPowerMixin):
def __init__(self):
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 80
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {}
self.cls = smp.NormalIndPower
class TestNormalIndPower2(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> np = pwr.2p.test(h=0.01,n=80,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.h = 0.01
res2.n = 80
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = ('Difference of proportion power calculation for' +
' binomial distribution (arcsine transformation)')
res2.note = 'same sample sizes'
self.res2 = res2
self.kwds = {'effect_size': res2.h, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power, 'ratio': 1}
self.kwds_extra = {'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp1(CheckPowerMixin):
def __init__(self):
# forcing one-sample by using ratio=0
#> example from above
# results copied not directly from R
res2 = Holder()
res2.n = 40
res2.d = 0.3
res2.sig_level = 0.05
res2.power = 0.475100870572638
res2.alternative = 'two.sided'
res2.note = 'NULL'
res2.method = 'two sample power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0}
self.cls = smp.NormalIndPower
class TestNormalIndPower_onesamp2(CheckPowerMixin):
# Note: same power as two sample case with twice as many observations
def __init__(self):
# forcing one-sample by using ratio=0
res2 = Holder()
#> np = pwr.norm.test(d=0.01,n=40,sig.level=0.05,alternative="less")
#> cat_items(np, "res2.")
res2.d = 0.01
res2.n = 40
res2.sig_level = 0.05
res2.power = 0.0438089705093578
res2.alternative = 'less'
res2.method = 'Mean power calculation for normal distribution with known variance'
self.res2 = res2
self.kwds = {'effect_size': res2.d, 'nobs1': res2.n,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
self.kwds_extra = {'ratio': 0, 'alternative':'smaller'}
self.cls = smp.NormalIndPower
class TestChisquarePower(CheckPowerMixin):
def __init__(self):
# one example from test_gof, results_power
res2 = Holder()
res2.w = 0.1
res2.N = 5
res2.df = 4
res2.sig_level = 0.05
res2.power = 0.05246644635810126
res2.method = 'Chi squared power calculation'
res2.note = 'N is the number of observations'
self.res2 = res2
self.kwds = {'effect_size': res2.w, 'nobs': res2.N,
'alpha': res2.sig_level, 'power':res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'n_bins': res2.df + 1}
self.cls = smp.GofChisquarePower
def _test_positional(self):
res1 = self.cls()
args_names = ['effect_size','nobs', 'alpha', 'n_bins']
kwds = copy.copy(self.kwds)
del kwds['power']
kwds.update(self.kwds_extra)
args = [kwds[arg] for arg in args_names]
if hasattr(self, 'decimal'):
decimal = self.decimal #pylint: disable-msg=E1101
else:
decimal = 6
assert_almost_equal(res1.power(*args), self.res2.power, decimal=decimal)
def test_ftest_power():
#equivalence ftest, ttest
for alpha in [0.01, 0.05, 0.1, 0.20, 0.50]:
res0 = smp.ttest_power(0.01, 200, alpha)
res1 = smp.ftest_power(0.01, 199, 1, alpha=alpha, ncc=0)
assert_almost_equal(res1, res0, decimal=6)
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
res1 = smp.ftest_anova_power(0.25, 200, 0.1592, k_groups=10)
res0 = 0.8408
assert_almost_equal(res1, res0, decimal=4)
# TODO: no class yet
# examples agains R::pwr
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.1**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.01
res2.sig_level = 0.01
res2.power = 0.0494137732920332
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=199, f2=0.3**2, sig.level=0.01)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 199
res2.f2 = 0.09
res2.sig_level = 0.01
res2.power = 0.7967191006290872
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
res1 = smp.ftest_power(np.sqrt(res2.f2), res2.v, res2.u,
alpha=res2.sig_level, ncc=1)
assert_almost_equal(res1, res2.power, decimal=5)
# class based version of two above test for Ftest
class TestFtestAnovaPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#example from Gplus documentation F-test ANOVA
#Total sample size:200
#Effect size "f":0.25
#Beta/alpha ratio:1
#Result:
#Alpha:0.1592
#Power (1-beta):0.8408
#Critical F:1.4762
#Lambda: 12.50000
#converted to res2 by hand
res2.f = 0.25
res2.n = 200
res2.k = 10
res2.alpha = 0.1592
res2.power = 0.8408
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': res2.f, 'nobs': res2.n,
'alpha': res2.alpha, 'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {'k_groups': res2.k} # rootfinding doesn't work
#self.args_names = ['effect_size','nobs', 'alpha']#, 'k_groups']
self.cls = smp.FTestAnovaPower
# precision for test_power
self.decimal = 4
class TestFtestPower(CheckPowerMixin):
def __init__(self):
res2 = Holder()
#> rf = pwr.f2.test(u=5, v=19, f2=0.3**2, sig.level=0.1)
#> cat_items(rf, "res2.")
res2.u = 5
res2.v = 19
res2.f2 = 0.09
res2.sig_level = 0.1
res2.power = 0.235454222377575
res2.method = 'Multiple regression power calculation'
self.res2 = res2
self.kwds = {'effect_size': np.sqrt(res2.f2), 'df_num': res2.v,
'df_denom': res2.u, 'alpha': res2.sig_level,
'power': res2.power}
# keyword for which we don't look for root:
# solving for n_bins doesn't work, will not be used in regular usage
self.kwds_extra = {}
self.args_names = ['effect_size', 'df_num', 'df_denom', 'alpha']
self.cls = smp.FTestPower
# precision for test_power
self.decimal = 5
def test_power_solver():
# messing up the solver to trigger backup
nip = smp.NormalIndPower()
# check result
es0 = 0.1
pow_ = nip.solve_power(es0, nobs1=1600, alpha=0.01, power=None, ratio=1,
alternative='larger')
# value is regression test
assert_almost_equal(pow_, 0.69219411243824214, decimal=5)
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 2)
# cause first optimizer to fail
nip.start_bqexp['effect_size'] = {'upp': -10, 'low': -20}
nip.start_ttp['effect_size'] = 0.14
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3, err_msg=repr(nip.cache_fit_res))
nip.start_ttp['effect_size'] = np.nan
es = nip.solve_power(None, nobs1=1600, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_almost_equal(es, es0, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 4)
# I let this case fail, could be fixed for some statistical tests
# (we shouldn't get here in the first place)
# effect size is negative, but last stage brentq uses [1e-8, 1-1e-8]
assert_raises(ValueError, nip.solve_power, None, nobs1=1600, alpha=0.01,
power=0.005, ratio=1, alternative='larger')
def test_power_solver_warn():
# messing up the solver to trigger warning
# I wrote this with scipy 0.9,
# convergence behavior of scipy 0.11 is different,
# fails at a different case, but is successful where it failed before
pow_ = 0.69219411243824214 # from previous function
nip = smp.NormalIndPower()
# using nobs, has one backup (fsolve)
nip.start_bqexp['nobs1'] = {'upp': 50, 'low': -20}
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
import scipy
if scipy.__version__ < '0.10':
assert_almost_equal(val, 1600, decimal=4)
assert_equal(nip.cache_fit_res[0], 1)
assert_equal(len(nip.cache_fit_res), 3)
# case that has convergence failure, and should warn
nip.start_ttp['nobs1'] = np.nan
from statsmodels.tools.sm_exceptions import ConvergenceWarning
assert_warns(ConvergenceWarning, nip.solve_power, 0.1, nobs1=None,
alpha=0.01, power=pow_, ratio=1, alternative='larger')
# this converges with scipy 0.11 ???
# nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1, alternative='larger')
with warnings.catch_warnings(): # python >= 2.6
warnings.simplefilter("ignore")
val = nip.solve_power(0.1, nobs1=None, alpha=0.01, power=pow_, ratio=1,
alternative='larger')
assert_equal(nip.cache_fit_res[0], 0)
assert_equal(len(nip.cache_fit_res), 3)
if __name__ == '__main__':
test_normal_power_explicit()
nt = TestNormalIndPower1()
nt.test_power()
nt.test_roots()
nt = TestNormalIndPower_onesamp1()
nt.test_power()
nt.test_roots()
|
bsd-3-clause
|
ConvergenceDA/visdom-web
|
VISDOM-server.py
|
1
|
17026
|
#!/usr/bin/python
# vim:ts=2 sw=2
"""CherryPy server that runs R processes and loads, filters, and visualizes Rdata files"""
from __future__ import absolute_import
from __future__ import print_function
import os, re
import json
import urllib
try:
import urlparse
from urllib import unquote
except ImportError:
import urllib.parse as urlparse
from urllib.parse import unquote
import logging
import mimetypes
# this is a new dependency
import cachetools # pip install cachetools
import numpy as np
import pandas as pd
import cherrypy
from cherrypy.lib.static import serve_file
from cherrypy.lib.static import serve_download
from jinja2 import Environment, FileSystemLoader
from jinja2support import Jinja2TemplatePlugin, Jinja2Tool
import DataService as ds
from six.moves import range
code_dir = os.path.dirname(os.path.abspath(__file__))
mimetypes.types_map[".xml"]="application/xml"
'''
r['getwd()']
#r['a<-5;a+3'] # doesn't return. Implies parsing or execution error, but would run as a one liner in the GUI
r = R()
r['data(trees)']
a = r.trees
type(a)
a[:4]
a[3:40]
a.loc[a.loc[:,'Girth'] > 12,:]
'''
# Jinja2 renders templates to html (or other text formats)
# Hat tip to: https://bitbucket.org/Lawouach/cherrypy-recipes/src/c399b40a3251/web/templating/jinja2_templating?at=default
# Register the Jinja2 plugin
env = Environment(loader=FileSystemLoader(os.path.join(code_dir,"template")))
Jinja2TemplatePlugin(cherrypy.engine, env=env).subscribe()
# Register the Jinja2 tool
cherrypy.tools.template = Jinja2Tool()
# This function checks if the user is connected via https and if not redirects to it
# In case users are worried about privacy, we want to make sure uploads are not done
# in the clear!
def force_https():
secure = cherrypy.request.scheme == 'https'
if not secure:
url = urlparse.urlparse(cherrypy.url())
secure_url = urlparse.urlunsplit(('https', url[1], url[2], url[3], url[4]))
raise cherrypy.HTTPRedirect(secure_url)
# check for https on every request cherrypy handles
cherrypy.tools.force_https = cherrypy.Tool('before_handler', force_https)
class Root: # the root serves (mostly) static site content
# home/landing/welcome page
@cherrypy.expose
def index(self):
count = cherrypy.session.get("count", 0) + 1
cherrypy.session["count"] = count
template = env.get_template("index.html")
response_dict = {} #"foo":"hi", "bar":"there"}
return template.render(**response_dict)
@cherrypy.expose
def jsdoc(self):
import markdown
count = cherrypy.session.get("count", 0) + 1
cherrypy.session["count"] = count
template = env.get_template("index.html")
response_dict = {} #"foo":"hi", "bar":"there"}
input_file = open('static/js/README.md','r')
text = input_file.read()
return markdown.markdown(text)
# home/landing/welcome page
@cherrypy.expose
def charts(self):
count = cherrypy.session.get("count", 0) + 1
cherrypy.session["count"] = count
template = env.get_template("charts.html")
response_dict = {}
return template.render(**response_dict)
class QueryService(object):
META_CACHE = {}
def metaDataResponse(self,qs):
if type(qs) == str:
queryObj = ds.parseDesc(qs)
else: queryObj = qs
#print json.dumps(queryObj, indent=2)
if queryObj['colList'] or queryObj['colInfo']:
# todo: what to do when there is more than one source?
dataSourceName = list(queryObj['dataSource'].keys())[0] # note that this is a hack to return just the first one
# todo: what to do with data sources that are too big?
if queryObj['colList']:
df = ds.restQuery('/s/' + dataSourceName) # get the full set of data
return json.dumps(df.columns.values.tolist()).encode('utf-8')
if queryObj['colInfo']:
if dataSourceName in self.META_CACHE: # check for and use the cache
print('Metadata from memory cache for %s' % (dataSourceName))
meta = self.META_CACHE[dataSourceName]
else:
df = ds.restQuery('/s/' + dataSourceName) # get the full set of data
desc = df.describe().transpose() # get basic stats for numerical columns
meta = ds.DataSource().getMetaData(dataSourceName) # get manual column metadata
if(meta is None): meta = pd.DataFrame(df.columns.values,index=df.columns.values,columns=['label'])
#meta = meta.join(desc) # join on feature names, which are the indices
meta = pd.merge(meta, desc, left_index=True, right_index=True, how='left') # alternate join approach is more flexible
self.META_CACHE[dataSourceName] = meta
print(meta.head())
print('That was the head of the column metadata')
# must convert mixed data types (NaNs are considered floats or float64s)
# to single str dtype due to bug in pandas. the JS lient doesn't care whether strings or
# mixed values are provided, so this work around is OK.
# see: https://github.com/pydata/pandas/issues/10289
return meta.astype(str).to_json(orient='index').encode('utf-8')
else: return None
@cherrypy.expose
def default(self,*args,**kwargs):
# accept json structured query objects via post
if cherrypy.request.method == 'POST':
cl = cherrypy.request.headers['Content-Length']
rawbody = cherrypy.request.body.read(int(cl))
queryObj = json.loads(rawbody)
else:
qs = unquote(cherrypy.request.query_string) # decode < and > symbols
queryObj = ds.parseDesc(qs)
cherrypy.response.headers['Content-Type'] = 'application/json'
mdr = self.metaDataResponse(queryObj)
if mdr: return mdr
# TODO: deprecate the use of ExpiringDict as it doesn't support Python 3 (md5 is gone)
# Use cachetools.TTLCache(maxsize=20, ttl=30*60)
# Note that memory isn't freed for expired items until a mutating set/delete operation is called.
# https://pythonhosted.org/cachetools/
expiringFilterCache = cherrypy.session.setdefault('EXPIRING_FILTER_CACHE', cachetools.TTLCache(maxsize=20, ttl=30*60))
# ExpiringDict is a fisrt in first out
#expiringFilterCache = cherrypy.session.setdefault('EXPIRING_FILTER_CACHE', ExpiringDict(max_len=20, max_age_seconds=30*60))
# OR non-expiring version which would leak memory
#expiringFilterCache = cherrypy.session.setdefault('EXPIRING_FILTER_CACHE', dict())
df = ds.executeQuery(queryObj, expiringFilterCache) # pass in the session to allow query result caching
if (queryObj['fmt'] == 'csv'):
cherrypy.response.headers['Content-Type'] = 'text/csv'
cherrypy.response.headers["Content-Disposition"] = "attachment; filename=VISDOM_export.csv"
cherrypy.response.headers["Pragma"] = "no-cache"
cherrypy.response.headers["Expires"] = "0"
return df.to_csv().encode('utf-8')
return df.to_json(orient='split').encode('utf-8')
@cherrypy.expose
def sources(self):
cherrypy.response.headers['Content-Type'] = 'application/json'
return json.dumps(ds.publicSources()).encode('utf-8')
@cherrypy.expose
def parse(self,**kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
qs = unquote(cherrypy.request.query_string) # decode < and > symbols
print(ds.pretty(ds.parseDesc(qs)))
return ds.pretty(ds.parseDesc(qs)).encode('utf-8')
@cherrypy.expose
def shape(self,*args,**kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
# query string
qs = unquote(cherrypy.request.query_string) # decode < and > symbols
mdr = self.metaDataResponse(qs)
if mdr: return mdr
pieces = qs.split('/')
print(pieces)
sourceName = pieces[2]
sourceCfg = ds.DataSource().getCfg(sourceName)
if sourceCfg is None:
raise ValueError("no config info available for %s" % sourceName)
sourcePrefix = sourceCfg.get('prefix',None)
if sourcePrefix is None: sourcePrefix = sourceName
sortType = pieces[3] # 'counts' or 'kwh'
topN = int(pieces[4])
qs = '/' + '/'.join(pieces[5:])
#print sortType, topN, qs
#/counts/10 or /kwh/10
ids = ds.restQuery('/s/' + sourceName + '|id' + qs) # find the list of unique ids filtered using the /f/etc. query
# load the counts of shapes per customer per dict shape
dictMembers = ds.restQuery('/s/%sDictMembers' % sourcePrefix)
dictKwh = ds.restQuery('/s/%sDictKwh' % sourcePrefix)
firstDataColIdx = 1
idName = 'id'
filteredDataColIdx = 1
# TODO: hack to support hand coded pgeres data along side standardized new VISDOM-R encoded data
if sourcePrefix == 'pgeres':
firstDataColIdx = 3
filteredDataColIdx = 4
idName = 'sp_id'
# total all the cluster members (total # of shapes) and all the kwh
totalMembers = float(dictMembers.iloc[:,firstDataColIdx:].sum().sum()) # sum both dimensions, ensuring float outcome
totalKwh = dictKwh.iloc[:,firstDataColIdx:].sum().sum() # sum both dimensions
print('Total: Members: %d, kWh: %0.1f' % (totalMembers, totalKwh))
filteredCounts = pd.merge(ids, dictMembers, left_on='id', right_on=idName, how='inner')
# load the kwh sums of shapes per customer per dict shape
filteredKwh = pd.merge(ids, dictKwh, left_on='id', right_on=idName, how='inner')
print('Filtered customer count: %d' % len(ids))
print('Filtered customers with shape data: %d' % len(filteredCounts.index))
# load the dictionary shapes
shapes = ds.restQuery('/s/%sDictCenters' % sourcePrefix)
if topN > len(shapes.index): topN = len(shapes.index)
countSum = filteredCounts.iloc[:,filteredDataColIdx:].sum(axis=0) # sum the cluster membership count columns
kwhSum = filteredKwh.iloc[:,filteredDataColIdx:].sum(axis=0) # sum the cluster kwh total columns
# compute the membership counts and total energy for each of the qualitative categories
categoryMap = ds.restQuery('/s/%sCategoryMapping' % sourcePrefix) # load shapes to qualitative categories
categoryMap['total_members'] = countSum.tolist() # strip the index so they match
categoryMap['total_kwh'] = kwhSum.tolist() # strip the index so they match
categoryGroups = categoryMap[['name','total_members','total_kwh']].groupby('name')
categoryStats = categoryGroups.sum()
categoryStats['pct_kwh'] = categoryStats.total_kwh / totalKwh
categoryStats['pct_members'] = categoryStats.total_members / totalMembers
categoryStats['pct_filtered_kwh'] = categoryStats.total_kwh / categoryMap['total_kwh'].sum()
categoryStats['pct_filtered_members'] = categoryStats.total_members / categoryMap['total_members'].sum()
categoryStats['name'] = categoryStats.index # add the index as a regular column, so the json format can be records.
print(categoryStats)
if sortType == 'members':
sortIdx = np.argsort(countSum)[::-1] # note that [::-1] reverses the array
elif sortType == 'kwh':
sortIdx = np.argsort( kwhSum)[::-1]
else:
raise ValueError('Bad sortType=%s from query %s' % (sortType,qs))
topIdx = sortIdx[list(range(topN))].as_matrix()
topShapes = shapes.iloc[topIdx,:]
#print pd.Series(kwhSum[topIdx].as_matrix(), index=topShapes.index)
print('Filtered: Members: %d, kWh: %0.1f' % (kwhSum[topIdx].sum(), countSum[topIdx].sum()))
topShapes['total_kwh'] = pd.Series( kwhSum[topIdx].as_matrix(), index=topShapes.index)
topShapes['total_members'] = pd.Series(countSum[topIdx].as_matrix(), index=topShapes.index)
topShapes['pct_kwh'] = pd.Series( kwhSum[topIdx].as_matrix() * 100 / totalKwh, index=topShapes.index)
topShapes['pct_members'] = pd.Series(countSum[topIdx].as_matrix() * 100 / totalMembers, index=topShapes.index)
topShapes['pct_filtered_kwh'] = pd.Series( kwhSum[topIdx].as_matrix() * 100 / kwhSum.sum(), index=topShapes.index)
topShapes['pct_filtered_members'] = pd.Series(countSum[topIdx].as_matrix() * 100 / float(countSum.sum()), index=topShapes.index)
# building a json format map with the top shapes under "top" and the categorical totals under "categories"
out = '{"top":%s,"categories":%s}' % (topShapes.to_json(orient='split'),categoryStats.to_json(orient='records'))
return out.encode('utf-8')
@cherrypy.expose
def response(self,*args,**kwargs):
cherrypy.response.headers['Content-Type'] = 'application/json'
# query string
qs = unquote(cherrypy.request.query_string) # decode < and > symbols
mdr = self.metaDataResponse(qs)
if mdr: return mdr
pieces = qs.split('/')
print(pieces)
sourceName = pieces[2]
sourceCfg = ds.DataSource().getCfg(sourceName)
if sourceCfg is None:
raise ValueError("no config info available for %s" % sourceName)
sourcePrefix = sourceCfg.get('prefix',None)
if sourcePrefix is None: sourcePrefix = sourceName
sortType = pieces[3] # 'savings' or 'pct_savings'
desc = pieces[4] == 'true'
topN = int(pieces[5])
qs = '/' + '/'.join(pieces[6:])
#print sortType, topN, qs
#/counts/10 or /kwh/10
ids = ds.restQuery('/s/' + sourceName + '|id' + qs) # find the list of unique ids filtered using the /f/etc. query
# load the counts of shapes per customer per dict shape
custResponse = ds.restQuery('/s/%sResponseCustomer' % sourcePrefix)
eventResponse = ds.restQuery('/s/%sResponseEvent' % sourcePrefix)
firstFcst = eventResponse.columns.get_loc('hkw1_fcst')
firstObs = eventResponse.columns.get_loc('hkw1_obs')
forecast = np.array( [ float( row[ firstFcst - 1 + row['hour']] + 0.0000001 ) # prevent divide by zero
for index, row in eventResponse.iterrows() ] )
actual = np.array( [ float( row[ firstObs - 1 + row['hour']] + 0.0000001 ) # prevent imbalance from / 0
for index, row in eventResponse.iterrows() ] )
eventResponse['pct_savings'] = (forecast - actual) / forecast
eventResponse['savings'] = forecast - actual
eventResponse['forecast'] = forecast
eventResponse['actual'] = actual
# sort using the specified sort order
eventResponse = eventResponse.sort(sortType,ascending=(not desc))
# building a json format map with the top shapes under "top" and the categorical totals under "categories"
#return '{"top":%s,"categories":%s}' % (topShapes.to_json(orient='split'),categoryStats.to_json(orient='records'))
out = '{"top":%s}' % (eventResponse.head(topN).to_json(orient='split'))
return out.encode('utf-8')
if __name__ == "__main__":
CONSOLE_LOG = False
# this controlls logging from all calls to logging anywhere in the app!
logging.basicConfig( format='%(asctime)s %(levelname)s %(module)s.%(funcName)s[%(lineno)d]: %(message)s',
datefmt='%m/%d %H:%M:%S',
level=logging.DEBUG,
filename='log/seserver.log' )
if(CONSOLE_LOG): # if we want to log to both the console and the file
# define a logging handler that writes to sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(
logging.Formatter(' %(asctime)s %(levelname)s %(module)s.%(funcName)s[%(lineno)d]: %(message)s',
datefmt='%m/%d %H:%M:%S') )
# add the handler to the root logger
logging.getLogger('').addHandler(console)
logging.info('Logging started')
bft_conf = os.path.join(os.path.dirname(__file__), "se.conf")
root = Root()
root.query = QueryService()
#root.img = ImageService()
#root.upload = UploadService()
cherrypy.server.socket_host = '0.0.0.0' # bind to all available interfaces (is this bad?)
# HACK to get cherrypy config parsed correctly under python 3.5
# see https://github.com/cherrypy/cherrypy/issues/1382
from cherrypy._cpconfig import reprconf
conf = reprconf.Parser().dict_from_file(bft_conf)
print(conf.keys())
static_dir = os.getcwd() # Root static dir is this file's directory.
conf['global']['app.root'] = static_dir
conf['global']['error_page.404'] = static_dir + "/template/404.html"
conf['global']['error_page.500'] = static_dir + "/template/500.html"
conf['/']['tools.staticdir.root'] = static_dir
conf['/']['tools.staticfile.root'] = static_dir
cherrypy.quickstart(root, '/', conf)
# should work, but doesn't under 3.5
#cherrypy.quickstart(root,config=bft_conf)
#cherrypy.tree.mount(Root(),"/",bft_conf)
#cherrypy.tree.mount(QueryService(),"/query",bft_conf)
#if hasattr(cherrypy.engine, 'block'):
# # 3.1 syntax
# cherrypy.engine.start()
# cherrypy.engine.block()
#else:
# # 3.0 syntax
# cherrypy.server.quickstart()
# cherrypy.engine.start()
|
mit
|
Maccimo/intellij-community
|
python/helpers/pycharm_matplotlib_backend/sitecustomize.py
|
10
|
1837
|
import os
import sys
import traceback
SHOW_DEBUG_INFO = os.getenv('PYCHARM_DEBUG', 'False').lower() in ['true', '1']
def debug(message):
if SHOW_DEBUG_INFO:
sys.stderr.write(message)
sys.stderr.write("\n")
debug("Executing PyCharm's `sitecustomize`")
modules_list = []
try:
# We want to import users sitecustomize.py file if any
sitecustomize = "sitecustomize"
parent_dir = os.path.abspath(os.path.join(__file__, os.pardir))
if parent_dir in sys.path:
sys.path.remove(parent_dir)
if sitecustomize in sys.modules:
pycharm_sitecustomize_module = sys.modules.pop(sitecustomize)
try:
import sitecustomize
except ImportError:
debug("User doesn't have a custom `sitecustomize`")
# return our module if we failed to find any other sitecustomize
# to prevent KeyError importing 'site.py'
sys.modules[sitecustomize] = pycharm_sitecustomize_module
sys.path.append(parent_dir)
# Use matplotlib backend from pycharm
modules_list = list(sys.modules.keys())
old_getfilesystemencoding = None
if not sys.getfilesystemencoding():
old_getfilesystemencoding = sys.getfilesystemencoding
sys.getfilesystemencoding = lambda: 'UTF-8'
import matplotlib
if old_getfilesystemencoding:
sys.getfilesystemencoding = old_getfilesystemencoding
matplotlib.use('module://backend_interagg')
debug("Custom matplotlib backend was set for SciView")
except:
# fallback in case matplotlib is not loaded correctly
if SHOW_DEBUG_INFO:
traceback.print_exc()
keys = list(sys.modules.keys())
if modules_list:
for key in keys:
if key not in modules_list:
sys.modules.pop(key)
|
apache-2.0
|
GeoODK/formhub
|
odk_viewer/tasks.py
|
4
|
9898
|
import sys, re
from celery import task
from django.db import transaction
from django.conf import settings
from django.core.mail import mail_admins
from odk_viewer.models import Export
from utils.export_tools import generate_export,\
generate_attachments_zip_export, generate_kml_export
from utils.logger_tools import mongo_sync_status, report_exception
from pandas_mongo_bridge import NoRecordsFoundError
def create_async_export(xform, export_type, query, force_xlsx, options=None):
username = xform.user.username
id_string = xform.id_string
@transaction.commit_on_success
def _create_export(xform, export_type):
return Export.objects.create(xform=xform, export_type=export_type)
export = _create_export(xform, export_type)
result = None
arguments = {
'username': username,
'id_string': id_string,
'export_id': export.id,
'query': query,
}
if export_type in [Export.XLS_EXPORT, Export.GDOC_EXPORT,
Export.CSV_EXPORT, Export.CSV_ZIP_EXPORT]:
if options and "group_delimiter" in options:
arguments["group_delimiter"] = options["group_delimiter"]
if options and "split_select_multiples" in options:
arguments["split_select_multiples"] =\
options["split_select_multiples"]
# start async export
if export_type in [Export.XLS_EXPORT, Export.GDOC_EXPORT]:
result = create_xls_export.apply_async((), arguments, countdown=10)
elif export_type == Export.CSV_EXPORT:
result = create_csv_export.apply_async(
(), arguments, countdown=10)
elif export_type == Export.CSV_ZIP_EXPORT:
result = create_csv_zip_export.apply_async(
(), arguments, countdown=10)
else:
raise Export.ExportTypeError
elif export_type == Export.ZIP_EXPORT:
# start async export
result = create_zip_export.apply_async(
(), arguments, countdown=10)
elif export_type == Export.KML_EXPORT:
# start async export
result = create_kml_export.apply_async(
(), arguments, countdown=10)
else:
raise Export.ExportTypeError
if result:
# when celery is running eager, the export has been generated by the
# time we get here so lets retrieve the export object a fresh before we
# save
if settings.CELERY_ALWAYS_EAGER:
export = Export.objects.get(id=export.id)
export.task_id = result.task_id
export.save()
return export, result
return None
@task()
def create_xls_export(username, id_string, export_id, query=None,
force_xlsx=True, group_delimiter='/',
split_select_multiples=True):
# we re-query the db instead of passing model objects according to
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#state
ext = 'xls' if not force_xlsx else 'xlsx'
export = Export.objects.get(id=export_id)
# though export is not available when for has 0 submissions, we
# catch this since it potentially stops celery
try:
gen_export = generate_export(
Export.XLS_EXPORT, ext, username, id_string, export_id, query,
group_delimiter, split_select_multiples)
except (Exception, NoRecordsFoundError) as e:
export.internal_status = Export.FAILED
export.save()
# mail admins
details = {
'export_id': export_id,
'username': username,
'id_string': id_string
}
report_exception("XLS Export Exception: Export ID - "
"%(export_id)s, /%(username)s/%(id_string)s"
% details, e, sys.exc_info())
#raise for now to let celery know we failed
# - doesnt seem to break celery`
raise
else:
return gen_export.id
@task()
def create_csv_export(username, id_string, export_id, query=None,
group_delimiter='/', split_select_multiples=True):
# we re-query the db instead of passing model objects according to
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#state
export = Export.objects.get(id=export_id)
try:
# though export is not available when for has 0 submissions, we
# catch this since it potentially stops celery
gen_export = generate_export(
Export.CSV_EXPORT, 'csv', username, id_string, export_id, query,
group_delimiter, split_select_multiples)
except NoRecordsFoundError:
# not much we can do but we don't want to report this as the user
# should not even be on this page if the survey has no records
export.internal_status = Export.FAILED
export.save()
except Exception as e:
export.internal_status = Export.FAILED
export.save()
# mail admins
details = {
'export_id': export_id,
'username': username,
'id_string': id_string
}
report_exception("CSV Export Exception: Export ID - "
"%(export_id)s, /%(username)s/%(id_string)s"
% details, e, sys.exc_info())
raise
else:
return gen_export.id
@task()
def create_kml_export(username, id_string, export_id, query=None):
# we re-query the db instead of passing model objects according to
# http://docs.celeryproject.org/en/latest/userguide/tasks.html#state
export = Export.objects.get(id=export_id)
try:
# though export is not available when for has 0 submissions, we
# catch this since it potentially stops celery
gen_export = generate_kml_export(
Export.KML_EXPORT, 'kml', username, id_string, export_id, query)
except (Exception, NoRecordsFoundError) as e:
export.internal_status = Export.FAILED
export.save()
# mail admins
details = {
'export_id': export_id,
'username': username,
'id_string': id_string
}
report_exception("KML Export Exception: Export ID - "
"%(export_id)s, /%(username)s/%(id_string)s"
% details, e, sys.exc_info())
raise
else:
return gen_export.id
@task()
def create_zip_export(username, id_string, export_id, query=None):
export = Export.objects.get(id=export_id)
try:
gen_export = generate_attachments_zip_export(
Export.ZIP_EXPORT, 'zip', username, id_string, export_id, query)
except (Exception, NoRecordsFoundError) as e:
export.internal_status = Export.FAILED
export.save()
# mail admins
details = {
'export_id': export_id,
'username': username,
'id_string': id_string
}
report_exception("Zip Export Exception: Export ID - "
"%(export_id)s, /%(username)s/%(id_string)s"
% details, e)
raise
else:
if not settings.TESTING_MODE:
delete_export.apply_async(
(), {'export_id': gen_export.id},
countdown=settings.ZIP_EXPORT_COUNTDOWN)
return gen_export.id
@task()
def create_csv_zip_export(username, id_string, export_id, query=None,
group_delimiter='/', split_select_multiples=True):
export = Export.objects.get(id=export_id)
try:
# though export is not available when for has 0 submissions, we
# catch this since it potentially stops celery
gen_export = generate_export(
Export.CSV_ZIP_EXPORT, 'zip', username, id_string, export_id, query,
group_delimiter, split_select_multiples)
except (Exception, NoRecordsFoundError) as e:
export.internal_status = Export.FAILED
export.save()
# mail admins
details = {
'export_id': export_id,
'username': username,
'id_string': id_string
}
report_exception("CSV ZIP Export Exception: Export ID - "
"%(export_id)s, /%(username)s/%(id_string)s"
% details, e, sys.exc_info())
raise
else:
return gen_export.id
@task()
def delete_export(export_id):
try:
export = Export.objects.get(id=export_id)
except Export.DoesNotExist:
pass
else:
export.delete()
return True
return False
SYNC_MONGO_MANUAL_INSTRUCTIONS = """
To re-sync manually, ssh into the server and run:
python manage.py sync_mongo -r [username] [id_string] --settings='formhub.preset.local_settings'
To force complete delete and re-creation, use the -a option:
python manage.py sync_mongo -ra [username] [id_string] --settings='formhub.preset.local_settings'
"""
REMONGO_PATTERN = re.compile(r'Total # of records to remongo: -?[1-9]+', re.IGNORECASE)
@task()
def email_mongo_sync_status():
"""Check the status of records in the mysql db versus mongodb, and, if necessary,
invoke the command to re-sync the two databases, sending an email report to the
admins of before and after, so that manual syncing (if necessary) can be done."""
before_report = mongo_sync_status()
if REMONGO_PATTERN.search(before_report):
# synchronization is necessary
after_report = mongo_sync_status(remongo=True)
else:
# no synchronization is needed
after_report = "No synchronization needed"
# send the before and after reports, along with instructions for
# syncing manually, as an email to the administrators
mail_admins("Mongo DB sync status",
'\n\n'.join([before_report,
after_report,
SYNC_MONGO_MANUAL_INSTRUCTIONS]))
|
bsd-2-clause
|
michigraber/scikit-learn
|
sklearn/__init__.py
|
154
|
3014
|
"""
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
bsd-3-clause
|
YorkUIRLab/eosdb
|
dynamic_nmf/TopicCollection.py
|
1
|
2672
|
import os, sys, random, operator
import logging as log
from optparse import OptionParser
import numpy as np
import sklearn.preprocessing
import text.util
import unsupervised.nmf, unsupervised.rankings, unsupervised.coherence
# --------------------------------------------------------------
class TopicCollection:
def __init__(self, top_terms=0, threshold=1e-6):
# settings
self.top_terms = top_terms
self.threshold = threshold
# state
self.topic_ids = []
self.all_weights = []
self.all_terms = set()
def add_topic_model(self, H, terms, window_topic_labels):
"""
Add topics from a window topic model to the collection.
"""
k = H.shape[0]
for topic_index in range(k):
topic_weights = {}
# use top terms only (sparse topic representation)?
if self.top_terms > 0:
top_indices = np.argsort(H[topic_index, :])[::-1]
for term_index in top_indices[0:self.top_terms]:
topic_weights[terms[term_index]] = H[topic_index, term_index]
self.all_terms.add(terms[term_index])
# use dense window topic vectors
else:
total_weight = 0.0
for term_index in range(len(terms)):
total_weight += H[topic_index, term_index]
for term_index in range(len(terms)):
w = H[topic_index, term_index] / total_weight
if w >= self.threshold:
topic_weights[terms[term_index]] = H[topic_index, term_index]
self.all_terms.add(terms[term_index])
self.all_weights.append(topic_weights)
self.topic_ids.append(window_topic_labels[topic_index])
def create_matrix(self):
"""
Create the topic-term matrix from all window topics that have been added so far.
"""
# map terms to column indices
all_terms = list(self.all_terms)
M = np.zeros((len(self.all_weights), len(all_terms)))
term_col_map = {}
for term in all_terms:
term_col_map[term] = len(term_col_map)
# populate the matrix in row-order
row = 0
for topic_weights in self.all_weights:
for term in topic_weights.keys():
M[row, term_col_map[term]] = topic_weights[term]
row += 1
# normalize the matrix rows to L2 unit length
normalizer = sklearn.preprocessing.Normalizer(norm='l2', copy=True)
normalizer.fit(M)
M = normalizer.transform(M)
return (M, all_terms)
|
lgpl-3.0
|
fyffyt/scikit-learn
|
sklearn/cluster/bicluster.py
|
211
|
19443
|
"""Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
|
bsd-3-clause
|
selva86/python-machine-learning
|
outliers/outlier_removal_regression.py
|
11
|
2376
|
#!/usr/bin/python
import random
import numpy
import matplotlib.pyplot as plt
import pickle
from outlier_cleaner import outlierCleaner
### load up some practice data with outliers in it
ages = pickle.load( open("practice_outliers_ages.pkl", "r") )
net_worths = pickle.load( open("practice_outliers_net_worths.pkl", "r") )
### ages and net_worths need to be reshaped into 2D numpy arrays
### second argument of reshape command is a tuple of integers: (n_rows, n_columns)
### by convention, n_rows is the number of data points
### and n_columns is the number of features
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
from sklearn.cross_validation import train_test_split
ages_train, ages_test, net_worths_train, net_worths_test = train_test_split(ages, net_worths, test_size=0.1, random_state=42)
### fill in a regression here! Name the regression object reg so that
### the plotting code below works, and you can see what your regression looks like
try:
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
pass
plt.scatter(ages, net_worths)
plt.show()
### identify and remove the most outlier-y points
cleaned_data = []
try:
predictions = reg.predict(ages_train)
cleaned_data = outlierCleaner( predictions, ages_train, net_worths_train )
except NameError:
print "your regression object doesn't exist, or isn't name reg"
print "can't make predictions to use in identifying outliers"
### only run this code if cleaned_data is returning data
if len(cleaned_data) > 0:
ages, net_worths, errors = zip(*cleaned_data)
ages = numpy.reshape( numpy.array(ages), (len(ages), 1))
net_worths = numpy.reshape( numpy.array(net_worths), (len(net_worths), 1))
### refit your cleaned data!
try:
reg.fit(ages, net_worths)
plt.plot(ages, reg.predict(ages), color="blue")
except NameError:
print "you don't seem to have regression imported/created,"
print " or else your regression object isn't named reg"
print " either way, only draw the scatter plot of the cleaned data"
plt.scatter(ages, net_worths)
plt.xlabel("ages")
plt.ylabel("net worths")
plt.show()
else:
print "outlierCleaner() is returning an empty list, no refitting to be done"
|
mit
|
WBradbeer/port-routing
|
graph_drawer.py
|
1
|
1168
|
import os
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
def df_to_edges(cost_df):
edges = []
for port in cost_df.index.values:
for dest in cost_df.index.values:
if cost_df[port][dest] > 0:
edges.append((port,dest,{'cost': cost_df[port][dest]}))
return edges
G = nx.DiGraph()
file_path = os.path.abspath(os.path.dirname(__file__))
edge_costs = pd.read_csv(file_path + "/data/port_costs.csv",
index_col="Port")
port_pos = {x[0]: [x[1], x[2]] for x in pd.read_csv(file_path +
"/data/port_pos.csv",
index_col="Port").itertuples()}
ports = edge_costs.index.values
edges = df_to_edges(edge_costs)
G.add_nodes_from(ports)
G.add_edges_from(edges)
edge_labels = {(x[0], x[1]): x[2]['cost'] for x in edges}
print edge_labels
nx.draw(G, pos=port_pos, node_size=1000, node_color=['k','b','b','r','k'])
nx.draw_networkx_labels(G, port_pos, font_size=18, font_color='white')
nx.draw_networkx_edge_labels(G, port_pos, font_size=12, edge_labels=edge_labels)
plt.show()
|
mit
|
wesm/arrow
|
python/pyarrow/tests/parquet/test_dataset.py
|
1
|
52590
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import os
import numpy as np
import pytest
import pyarrow as pa
from pyarrow import fs
from pyarrow.filesystem import LocalFileSystem
from pyarrow.tests import util
from pyarrow.tests.parquet.common import (
parametrize_legacy_dataset, parametrize_legacy_dataset_fixed,
parametrize_legacy_dataset_not_supported)
from pyarrow.util import guid
from pyarrow.vendored.version import Version
try:
import pyarrow.parquet as pq
from pyarrow.tests.parquet.common import (
_read_table, _test_dataframe, _write_table)
except ImportError:
pq = None
try:
import pandas as pd
import pandas.testing as tm
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table.equals(table1)
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', ['foo', 'bar', 'baz'])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@parametrize_legacy_dataset_fixed
def test_filesystem_uri(tempdir, use_legacy_dataset):
table = pa.table({"a": [1, 2, 3]})
directory = tempdir / "data_dir"
directory.mkdir()
path = directory / "data.parquet"
pq.write_table(table, str(path))
# filesystem object
result = pq.read_table(
path, filesystem=fs.LocalFileSystem(),
use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
# filesystem URI
result = pq.read_table(
"data_dir/data.parquet", filesystem=util._filesystem_uri(tempdir),
use_legacy_dataset=use_legacy_dataset)
assert result.equals(table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_partitioned_directory(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
_partition_test_for_filesystem(fs, tempdir, use_legacy_dataset)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem._get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_partitioned_columns_selection(tempdir, use_legacy_dataset):
# ARROW-3861 - do not include partition columns in resulting table when
# `columns` keyword was passed without those columns
fs = LocalFileSystem._get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
dataset = pq.ParquetDataset(
base_path, use_legacy_dataset=use_legacy_dataset)
result = dataset.read(columns=["values"])
if use_legacy_dataset:
# ParquetDataset implementation always includes the partition columns
# automatically, and we can't easily "fix" this since dask relies on
# this behaviour (ARROW-8644)
assert result.column_names == ["values", "foo", "bar"]
else:
assert result.column_names == ["values"]
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_equivalency(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
# Old filters syntax:
# integer == 1 AND string != b AND boolean == True
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', 'True')],
use_legacy_dataset=use_legacy_dataset,
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
# filters in disjunctive normal form:
# (integer == 1 AND string != b AND boolean == True) OR
# (integer == 2 AND boolean == False)
# TODO(ARROW-3388): boolean columns are reconstructed as string
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(
base_path, filesystem=fs, filters=filters,
use_legacy_dataset=use_legacy_dataset)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
# Check that all rows in the DF fulfill the filter
# Pandas 0.23.x has problems with indexing constant memoryviews in
# categoricals. Thus we need to make an explicit copy here with np.array.
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
if use_legacy_dataset:
# Check for \0 in predicate values. Until they are correctly
# implemented in ARROW-3391, they would otherwise lead to weird
# results with the current code.
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', '1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
else:
for filters in [[[('string', '==', b'1\0a')]],
[[('string', '==', '1\0a')]]]:
dataset = pq.ParquetDataset(
base_path, filesystem=fs, filters=filters,
use_legacy_dataset=False)
assert dataset.read().num_rows == 0
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_cutoff_exclusive_integer(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@parametrize_legacy_dataset
@pytest.mark.xfail(
# different error with use_legacy_datasets because result_df is no longer
# categorical
raises=(TypeError, AssertionError),
reason='Loss of type information in creation of categoricals.'
)
def test_filters_cutoff_exclusive_datetime(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
@pytest.mark.dataset
def test_filters_inclusive_datetime(tempdir):
# ARROW-11480
path = tempdir / 'timestamps.parquet'
pd.DataFrame({
"dates": pd.date_range("2020-01-01", periods=10, freq="D"),
"id": range(10)
}).to_parquet(path, use_deprecated_int96_timestamps=True)
table = pq.read_table(path, filters=[
("dates", "<=", datetime.datetime(2020, 1, 5))
])
assert table.column('id').to_pylist() == [0, 1, 2, 3, 4]
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_inclusive_integer(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_inclusive_set(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('string', 'in', 'ab')],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 'a' in result_df['string'].values
assert 'b' in result_df['string'].values
assert 'c' not in result_df['string'].values
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', [1]), ('string', 'in', ('a', 'b')),
('boolean', 'not in', {False})],
use_legacy_dataset=use_legacy_dataset
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_invalid_pred_op(tempdir, use_legacy_dataset):
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(TypeError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', 'in', 3), ],
use_legacy_dataset=use_legacy_dataset)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', '=<', 3), ],
use_legacy_dataset=use_legacy_dataset)
if use_legacy_dataset:
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', 'in', set()), ],
use_legacy_dataset=use_legacy_dataset)
else:
# Dataset API returns empty table instead
dataset = pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', 'in', set()), ],
use_legacy_dataset=use_legacy_dataset)
assert dataset.read().num_rows == 0
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[('integers', '!=', {3})],
use_legacy_dataset=use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset_fixed
def test_filters_invalid_column(tempdir, use_legacy_dataset):
# ARROW-5572 - raise error on invalid name in filter specification
# works with new dataset / xfail with legacy implementation
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [['integers', integer_keys]]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
msg = r"No match for FieldRef.Name\(non_existent_column\)"
with pytest.raises(ValueError, match=msg):
pq.ParquetDataset(base_path, filesystem=fs,
filters=[('non_existent_column', '<', 3), ],
use_legacy_dataset=use_legacy_dataset).read()
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_filters_read_table(tempdir, use_legacy_dataset):
# test that filters keyword is passed through in read_table
fs = LocalFileSystem._get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)],
use_legacy_dataset=use_legacy_dataset)
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]],
use_legacy_dataset=use_legacy_dataset)
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)],
use_legacy_dataset=use_legacy_dataset)
assert table.num_rows == 3
@pytest.mark.pandas
@parametrize_legacy_dataset_fixed
def test_partition_keys_with_underscores(tempdir, use_legacy_dataset):
# ARROW-5666 - partition field values with underscores preserve underscores
# xfail with legacy dataset -> they get interpreted as integers
fs = LocalFileSystem._get_instance()
base_path = tempdir
string_keys = ["2019_2", "2019_3"]
partition_spec = [
['year_week', string_keys],
]
N = 2
df = pd.DataFrame({
'index': np.arange(N),
'year_week': np.array(string_keys, dtype='object'),
}, columns=['index', 'year_week'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, use_legacy_dataset=use_legacy_dataset)
result = dataset.read()
assert result.column("year_week").to_pylist() == string_keys
@parametrize_legacy_dataset
def test_read_s3fs(s3_example_s3fs, use_legacy_dataset):
fs, path = s3_example_s3fs
path = path + "/test.parquet"
table = pa.table({"a": [1, 2, 3]})
_write_table(table, path, filesystem=fs)
result = _read_table(
path, filesystem=fs, use_legacy_dataset=use_legacy_dataset
)
assert result.equals(table)
@parametrize_legacy_dataset
def test_read_directory_s3fs(s3_example_s3fs, use_legacy_dataset):
fs, directory = s3_example_s3fs
path = directory + "/test.parquet"
table = pa.table({"a": [1, 2, 3]})
_write_table(table, path, filesystem=fs)
result = _read_table(
directory, filesystem=fs, use_legacy_dataset=use_legacy_dataset
)
assert result.equals(table)
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_read_partitioned_directory_s3fs_wrapper(
s3_example_s3fs, use_legacy_dataset
):
import s3fs
from pyarrow.filesystem import S3FSWrapper
if Version(s3fs.__version__) >= Version("0.5"):
pytest.skip("S3FSWrapper no longer working for s3fs 0.5+")
fs, path = s3_example_s3fs
with pytest.warns(FutureWarning):
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, path)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(
path, filesystem=fs, use_legacy_dataset=use_legacy_dataset
)
dataset.read()
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_partitioned_directory_s3fs(s3_example_s3fs, use_legacy_dataset):
fs, path = s3_example_s3fs
_partition_test_for_filesystem(
fs, path, use_legacy_dataset=use_legacy_dataset
)
def _partition_test_for_filesystem(fs, base_path, use_legacy_dataset=True):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs, use_legacy_dataset=use_legacy_dataset)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
pathsep = getattr(fs, "pathsep", getattr(fs, "sep", "/"))
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = pathsep.join([
str(base_dir),
'{}={}'.format(name, value)
])
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = pathsep.join([level_dir, guid()])
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
file_success = pathsep.join([level_dir, '_SUCCESS'])
with fs.open(file_success, 'wb') as f:
pass
else:
_visit_level(level_dir, level + 1, this_part_keys)
file_success = pathsep.join([level_dir, '_SUCCESS'])
with fs.open(file_success, 'wb') as f:
pass
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
import pandas as pd
import pyarrow.parquet as pq
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem._get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem._get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
# to avoid pandas warning
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@parametrize_legacy_dataset
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir, use_legacy_dataset):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]],
use_legacy_dataset=use_legacy_dataset)
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_read_multiple_files(tempdir, use_legacy_dataset):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(
paths, use_legacy_dataset=use_legacy_dataset, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
# TODO(dataset) specifying metadata not yet supported
metadata = pq.read_metadata(paths[0])
if use_legacy_dataset:
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pq.ParquetDataset(dirpath, schema=metadata.schema).read()
assert result3.equals(expected)
else:
with pytest.raises(ValueError, match="no longer supported"):
pq.read_table(paths, metadata=metadata, use_legacy_dataset=False)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pq.read_table(
dirpath, columns=col_names, use_legacy_dataset=use_legacy_dataset
)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pq.read_table(
dirpath, use_threads=True, use_legacy_dataset=use_legacy_dataset
)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
if not use_legacy_dataset:
# TODO(dataset) Dataset API skips bad files
return
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_read_pandas(tempdir, use_legacy_dataset):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
# also be able to pass the columns as a set (ARROW-12314)
result = dataset.read_pandas(columns=set(columns)).to_pandas()
assert result.shape == expected.shape
# column order can be different because of using a set
tm.assert_frame_equal(result.reindex(columns=expected.columns), expected)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_memory_map(tempdir, use_legacy_dataset):
# ARROW-2627: Check that we can use ParquetDataset with memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
dataset = pq.ParquetDataset(
dirpath, memory_map=True, use_legacy_dataset=use_legacy_dataset)
assert dataset.read().equals(table)
if use_legacy_dataset:
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_enable_buffered_stream(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
with pytest.raises(ValueError):
pq.ParquetDataset(
dirpath, buffer_size=-64,
use_legacy_dataset=use_legacy_dataset)
for buffer_size in [128, 1024]:
dataset = pq.ParquetDataset(
dirpath, buffer_size=buffer_size,
use_legacy_dataset=use_legacy_dataset)
assert dataset.read().equals(table)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
def _assert_dataset_paths(dataset, paths, use_legacy_dataset):
if use_legacy_dataset:
assert set(map(str, paths)) == {x.path for x in dataset.pieces}
else:
paths = [str(path.as_posix()) for path in paths]
assert set(paths) == set(dataset._dataset.files)
@pytest.mark.pandas
@parametrize_legacy_dataset
@pytest.mark.parametrize('dir_prefix', ['_', '.'])
def test_ignore_private_directories(tempdir, dir_prefix, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '{}staging'.format(dir_prefix)).mkdir()
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_ignore_hidden_files_dot(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_ignore_hidden_files_underscore(tempdir, use_legacy_dataset):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
@pytest.mark.parametrize('dir_prefix', ['_', '.'])
def test_ignore_no_private_directories_in_base_path(
tempdir, dir_prefix, use_legacy_dataset
):
# ARROW-8427 - don't ignore explicitly listed files if parent directory
# is a private directory
dirpath = tempdir / "{0}data".format(dir_prefix) / guid()
dirpath.mkdir(parents=True)
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
dataset = pq.ParquetDataset(paths, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
# ARROW-9644 - don't ignore full directory with underscore in base path
dataset = pq.ParquetDataset(dirpath, use_legacy_dataset=use_legacy_dataset)
_assert_dataset_paths(dataset, paths, use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset_fixed
def test_ignore_custom_prefixes(tempdir, use_legacy_dataset):
# ARROW-9573 - allow override of default ignore_prefixes
part = ["xxx"] * 3 + ["yyy"] * 3
table = pa.table([
pa.array(range(len(part))),
pa.array(part).dictionary_encode(),
], names=['index', '_part'])
# TODO use_legacy_dataset ARROW-10247
pq.write_to_dataset(table, str(tempdir), partition_cols=['_part'])
private_duplicate = tempdir / '_private_duplicate'
private_duplicate.mkdir()
pq.write_to_dataset(table, str(private_duplicate),
partition_cols=['_part'])
read = pq.read_table(
tempdir, use_legacy_dataset=use_legacy_dataset,
ignore_prefixes=['_private'])
assert read.equals(table)
@parametrize_legacy_dataset_fixed
def test_empty_directory(tempdir, use_legacy_dataset):
# ARROW-5310 - reading empty directory
# fails with legacy implementation
empty_dir = tempdir / 'dataset'
empty_dir.mkdir()
dataset = pq.ParquetDataset(
empty_dir, use_legacy_dataset=use_legacy_dataset)
result = dataset.read()
assert result.num_rows == 0
assert result.num_columns == 0
def _test_write_to_dataset_with_partitions(base_path,
use_legacy_dataset=True,
filesystem=None,
schema=None,
index_name=None):
import pandas as pd
import pandas.testing as tm
import pyarrow.parquet as pq
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem,
use_legacy_dataset=use_legacy_dataset)
metadata_path = os.path.join(str(base_path), '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True,
use_legacy_dataset=use_legacy_dataset)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
if use_legacy_dataset:
dataset_cols = set(dataset.schema.to_arrow_schema().names)
else:
# NB schema property is an arrow and not parquet schema
dataset_cols = set(dataset.schema.names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
input_df = input_df[cols]
# Partitioned columns become 'categorical' dtypes
for col in partition_by:
output_df[col] = output_df[col].astype('category')
tm.assert_frame_equal(output_df, input_df)
def _test_write_to_dataset_no_partitions(base_path,
use_legacy_dataset=True,
filesystem=None):
import pandas as pd
import pyarrow.parquet as pq
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem._get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(str(base_path))
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(
base_path, filesystem=filesystem,
use_legacy_dataset=use_legacy_dataset
).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions(tempdir, use_legacy_dataset):
_test_write_to_dataset_with_partitions(str(tempdir), use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions_and_schema(
tempdir, use_legacy_dataset
):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(
str(tempdir), use_legacy_dataset, schema=schema)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions_and_index_name(
tempdir, use_legacy_dataset
):
_test_write_to_dataset_with_partitions(
str(tempdir), use_legacy_dataset, index_name='index_name')
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_no_partitions(tempdir, use_legacy_dataset):
_test_write_to_dataset_no_partitions(str(tempdir), use_legacy_dataset)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_pathlib(tempdir, use_legacy_dataset):
_test_write_to_dataset_with_partitions(
tempdir / "test1", use_legacy_dataset)
_test_write_to_dataset_no_partitions(
tempdir / "test2", use_legacy_dataset)
@pytest.mark.pandas
@pytest.mark.s3
@parametrize_legacy_dataset
def test_write_to_dataset_pathlib_nonlocal(
tempdir, s3_example_s3fs, use_legacy_dataset
):
# pathlib paths are only accepted for local files
fs, _ = s3_example_s3fs
with pytest.raises(TypeError, match="path-like objects are only allowed"):
_test_write_to_dataset_with_partitions(
tempdir / "test1", use_legacy_dataset, filesystem=fs)
with pytest.raises(TypeError, match="path-like objects are only allowed"):
_test_write_to_dataset_no_partitions(
tempdir / "test2", use_legacy_dataset, filesystem=fs)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_with_partitions_s3fs(
s3_example_s3fs, use_legacy_dataset
):
fs, path = s3_example_s3fs
_test_write_to_dataset_with_partitions(
path, use_legacy_dataset, filesystem=fs)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_write_to_dataset_no_partitions_s3fs(
s3_example_s3fs, use_legacy_dataset
):
fs, path = s3_example_s3fs
_test_write_to_dataset_no_partitions(
path, use_legacy_dataset, filesystem=fs)
@pytest.mark.pandas
@parametrize_legacy_dataset_not_supported
def test_write_to_dataset_with_partitions_and_custom_filenames(
tempdir, use_legacy_dataset
):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{}-{}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback,
use_legacy_dataset=use_legacy_dataset)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.pandas
def test_write_to_dataset_filesystem(tempdir):
df = pd.DataFrame({'A': [1, 2, 3]})
table = pa.Table.from_pandas(df)
path = str(tempdir)
pq.write_to_dataset(table, path, filesystem=fs.LocalFileSystem())
result = pq.read_table(path)
assert result.equals(table)
# TODO(dataset) support pickling
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem._get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
def _assert_dataset_is_picklable(dataset, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_builtin_pickle_dataset(tempdir, datadir):
import pickle
dataset = _make_dataset_for_pickling(tempdir)
_assert_dataset_is_picklable(dataset, pickler=pickle)
@pytest.mark.pandas
def test_cloudpickle_dataset(tempdir, datadir):
cp = pytest.importorskip('cloudpickle')
dataset = _make_dataset_for_pickling(tempdir)
_assert_dataset_is_picklable(dataset, pickler=cp)
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_partitioned_dataset(tempdir, use_legacy_dataset):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(
path, use_legacy_dataset=use_legacy_dataset).read()
pq.write_table(table, path / "output.parquet")
@pytest.mark.pandas
@parametrize_legacy_dataset
def test_dataset_read_dictionary(tempdir, use_legacy_dataset):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[util.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[util.rands(10) for i in range(5)] * 10], names=['f0'])
# TODO pass use_legacy_dataset (need to fix unique names)
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(
path, read_dictionary=['f0'],
use_legacy_dataset=use_legacy_dataset).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
@pytest.mark.dataset
def test_dataset_unsupported_keywords():
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, schema=pa.schema([]))
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, metadata=pa.schema([]))
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, validate_schema=False)
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, split_row_groups=True)
with pytest.raises(ValueError, match="not yet supported with the new"):
pq.ParquetDataset("", use_legacy_dataset=False, metadata_nthreads=4)
with pytest.raises(ValueError, match="no longer supported"):
pq.read_table("", use_legacy_dataset=False, metadata=pa.schema([]))
@pytest.mark.dataset
def test_dataset_partitioning(tempdir):
import pyarrow.dataset as ds
# create small dataset with directory partitioning
root_path = tempdir / "test_partitioning"
(root_path / "2012" / "10" / "01").mkdir(parents=True)
table = pa.table({'a': [1, 2, 3]})
pq.write_table(
table, str(root_path / "2012" / "10" / "01" / "data.parquet"))
# This works with new dataset API
# read_table
part = ds.partitioning(field_names=["year", "month", "day"])
result = pq.read_table(
str(root_path), partitioning=part, use_legacy_dataset=False)
assert result.column_names == ["a", "year", "month", "day"]
result = pq.ParquetDataset(
str(root_path), partitioning=part, use_legacy_dataset=False).read()
assert result.column_names == ["a", "year", "month", "day"]
# This raises an error for legacy dataset
with pytest.raises(ValueError):
pq.read_table(
str(root_path), partitioning=part, use_legacy_dataset=True)
with pytest.raises(ValueError):
pq.ParquetDataset(
str(root_path), partitioning=part, use_legacy_dataset=True)
@pytest.mark.dataset
def test_parquet_dataset_new_filesystem(tempdir):
# Ensure we can pass new FileSystem object to ParquetDataset
# (use new implementation automatically without specifying
# use_legacy_dataset=False)
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, tempdir / 'data.parquet')
# don't use simple LocalFileSystem (as that gets mapped to legacy one)
filesystem = fs.SubTreeFileSystem(str(tempdir), fs.LocalFileSystem())
dataset = pq.ParquetDataset('.', filesystem=filesystem)
result = dataset.read()
assert result.equals(table)
def test_parquet_dataset_partitions_piece_path_with_fsspec(tempdir):
# ARROW-10462 ensure that on Windows we properly use posix-style paths
# as used by fsspec
fsspec = pytest.importorskip("fsspec")
filesystem = fsspec.filesystem('file')
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, tempdir / 'data.parquet')
# pass a posix-style path (using "/" also on Windows)
path = str(tempdir).replace("\\", "/")
dataset = pq.ParquetDataset(path, filesystem=filesystem)
# ensure the piece path is also posix-style
expected = path + "/data.parquet"
assert dataset.pieces[0].path == expected
|
apache-2.0
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/tseries/period.py
|
2
|
32600
|
# pylint: disable=E1101,E1103,W0232
import operator
from datetime import datetime, date, timedelta
import numpy as np
from pandas.core.base import PandasObject
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index, Index
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.tools import parse_time_string
import pandas.tseries.offsets as offsets
from pandas._period import Period
import pandas._period as period
from pandas._period import (
get_period_field_arr,
_validate_end_alias,
_quarter_to_myear,
)
import pandas.core.common as com
from pandas.core.common import (isnull, _INT64_DTYPE, _maybe_box,
_values_from_object, ABCSeries,
is_integer, is_float)
from pandas import compat
from pandas.lib import Timestamp, Timedelta
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.algos as _algos
from pandas.compat import zip, u
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = _gfc(self.freq)
return get_period_field_arr(alias, self.values, base)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _get_ordinals(data, freq):
f = lambda x: Period(x, freq=freq).ordinal
if isinstance(data[0], Period):
return period.extract_ordinals(data, freq)
else:
return lib.map_infer(data, f)
def dt64arr_to_periodarr(data, freq, tz):
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: %s' % data.dtype)
base, mult = _gfc(freq)
return period.dt64arr_to_periodarr(data.view('i8'), base, tz)
# --- Period index sketch
def _period_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
if isinstance(other, Period):
func = getattr(self.values, opname)
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
raise AssertionError("Frequencies must be equal")
result = getattr(self.values, opname)(other.values)
mask = (com.mask_missing(self.values, tslib.iNaT) |
com.mask_missing(other.values, tslib.iNaT))
if mask.any():
result[mask] = nat_result
return result
else:
other = Period(other, freq=self.freq)
func = getattr(self.values, opname)
result = func(other.ordinal)
if other.ordinal == tslib.iNaT:
result.fill(nat_result)
mask = self.values == tslib.iNaT
if mask.any():
result[mask] = nat_result
return result
return wrapper
class PeriodIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
year : int, array, or Series, default None
month : int, array, or Series, default None
quarter : int, array, or Series, default None
day : int, array, or Series, default None
hour : int, array, or Series, default None
minute : int, array, or Series, default None
second : int, array, or Series, default None
tz : object, default None
Timezone for converting datetime64 data to Periods
Examples
--------
>>> idx = PeriodIndex(year=year_arr, quarter=q_arr)
>>> idx2 = PeriodIndex(start='2000', end='2010', freq='A')
"""
_box_scalars = True
_typ = 'periodindex'
_attributes = ['name','freq']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq', 'days_in_month', 'daysinmonth']
_is_numeric_dtype = False
freq = None
__eq__ = _period_index_cmp('__eq__')
__ne__ = _period_index_cmp('__ne__', nat_result=True)
__lt__ = _period_index_cmp('__lt__')
__gt__ = _period_index_cmp('__gt__')
__le__ = _period_index_cmp('__le__')
__ge__ = _period_index_cmp('__ge__')
def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None,
periods=None, copy=False, name=None, tz=None, **kwargs):
freq = frequencies.get_standard_freq(freq)
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None:
if ordinal is not None:
data = np.asarray(ordinal, dtype=np.int64)
else:
data, freq = cls._generate_range(start, end, periods,
freq, kwargs)
else:
ordinal, freq = cls._from_arraylike(data, freq, tz)
data = np.array(ordinal, dtype=np.int64, copy=False)
return cls._simple_new(data, name=name, freq=freq)
@classmethod
def _generate_range(cls, start, end, periods, freq, fields):
field_count = len(fields)
if com._count_not_none(start, end) > 0:
if field_count > 0:
raise ValueError('Can either instantiate from fields '
'or endpoints, but not both')
subarr, freq = _get_ordinal_range(start, end, periods, freq)
elif field_count > 0:
subarr, freq = _range_from_fields(freq=freq, **fields)
else:
raise ValueError('Not enough parameters to construct '
'Period range')
return subarr, freq
@classmethod
def _from_arraylike(cls, data, freq, tz):
if not isinstance(data, (np.ndarray, PeriodIndex, DatetimeIndex, Int64Index)):
if np.isscalar(data) or isinstance(data, Period):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = com._ensure_int64(data)
if freq is None:
raise ValueError('freq not specified')
data = np.array([Period(x, freq=freq).ordinal for x in data],
dtype=np.int64)
except (TypeError, ValueError):
data = com._ensure_object(data)
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
data = _get_ordinals(data, freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, _ = _gfc(data.freq)
base2, _ = _gfc(freq)
data = period.period_asfreq_arr(data.values, base1,
base2, 1)
else:
if freq is None and len(data) > 0:
freq = getattr(data[0], 'freq', None)
if freq is None:
raise ValueError('freq not specified and cannot be '
'inferred from first element')
if data.dtype != np.int64:
if np.issubdtype(data.dtype, np.datetime64):
data = dt64arr_to_periodarr(data, freq, tz)
else:
try:
data = com._ensure_int64(data)
except (TypeError, ValueError):
data = com._ensure_object(data)
data = _get_ordinals(data, freq)
return data, freq
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
result = object.__new__(cls)
result._data = values
result.name = name
result.freq = freq
result._reset_identity()
return result
@property
def _na_value(self):
return self._box_func(tslib.iNaT)
def __contains__(self, key):
if not isinstance(key, Period) or key.freq != self.freq:
if isinstance(key, compat.string_types):
try:
self.get_loc(key)
return True
except Exception:
return False
return False
return key.ordinal in self._engine
@property
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
def _to_embed(self, keep_tz=False):
""" return an array repr of this object, potentially casting to object """
return self.asobject.values
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
where_idx = where
if isinstance(where_idx, DatetimeIndex):
where_idx = PeriodIndex(where_idx.values, freq=self.freq)
locs = self.values[mask].searchsorted(where_idx.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where_idx.values < self.values[first])] = -1
return result
def _array_values(self):
return self.asobject
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return Index(np.array(list(self), dtype), dtype)
elif dtype == _INT64_DTYPE:
return Index(self.values, dtype)
raise ValueError('Cannot cast PeriodIndex to dtype %s' % dtype)
def searchsorted(self, key, side='left'):
if isinstance(key, Period):
if key.freq != self.freq:
raise ValueError("Different period frequency: %s" % key.freq)
key = key.ordinal
elif isinstance(key, compat.string_types):
key = Period(key, freq=self.freq).ordinal
return self.values.searchsorted(key, side=side)
@property
def is_all_dates(self):
return True
@property
def is_full(self):
"""
Returns True if there are any missing periods from start to end
"""
if len(self) == 0:
return True
if not self.is_monotonic:
raise ValueError('Index is not monotonic')
values = self.values
return ((values[1:] - values[:-1]) < 2).all()
@property
def freqstr(self):
return self.freq
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
freq = frequencies.get_standard_freq(freq)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
if mult2 != 1:
raise ValueError('Only mult == 1 supported')
end = how == 'E'
new_data = period.period_asfreq_arr(self.values, base1, base2, end)
return self._simple_new(new_data, self.name, freq=freq)
def to_datetime(self, dayfirst=False):
return self.to_timestamp()
year = _field_accessor('year', 0, "The year of the period")
month = _field_accessor('month', 3, "The month as January=1, December=12")
day = _field_accessor('day', 4, "The days of the period")
hour = _field_accessor('hour', 5, "The hour of the period")
minute = _field_accessor('minute', 6, "The minute of the period")
second = _field_accessor('second', 7, "The second of the period")
weekofyear = _field_accessor('week', 8, "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9, "The ordinal day of the year")
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11, "The number of days in the month")
daysinmonth = days_in_month
def _get_object_array(self):
freq = self.freq
return np.array([ Period._from_ordinal(ordinal=x, freq=freq) for x in self.values], copy=False)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self._get_object_array()
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'int64'):
try:
other = PeriodIndex(other)
except:
return False
return np.array_equal(self.asi8, other.asi8)
def to_timestamp(self, freq=None, how='start'):
"""
Cast to DatetimeIndex
Parameters
----------
freq : string or DateOffset, default 'D' for week or longer, 'S'
otherwise
Target frequency
how : {'s', 'e', 'start', 'end'}
Returns
-------
DatetimeIndex
"""
how = _validate_end_alias(how)
if freq is None:
base, mult = _gfc(self.freq)
freq = frequencies.get_to_timestamp_base(base)
base, mult = _gfc(freq)
new_data = self.asfreq(freq, how)
new_data = period.periodarr_to_dt64arr(new_data.values, base)
return DatetimeIndex(new_data, freq='infer', name=self.name)
def _add_delta(self, other):
if isinstance(other, (timedelta, np.timedelta64, offsets.Tick, Timedelta)):
offset = frequencies.to_offset(self.freq)
if isinstance(offset, offsets.Tick):
nanos = tslib._delta_to_nanoseconds(other)
offset_nanos = tslib._delta_to_nanoseconds(offset)
if nanos % offset_nanos == 0:
return self.shift(nanos // offset_nanos)
elif isinstance(other, offsets.DateOffset):
freqstr = frequencies.get_standard_freq(other)
base = frequencies.get_base_alias(freqstr)
if base == self.freq:
return self.shift(other.n)
raise ValueError("Input has different freq from PeriodIndex(freq={0})".format(self.freq))
def shift(self, n):
"""
Specialized shift which produces an PeriodIndex
Parameters
----------
n : int
Periods to shift by
freq : freq string
Returns
-------
shifted : PeriodIndex
"""
mask = self.values == tslib.iNaT
values = self.values + n
values[mask] = tslib.iNaT
return PeriodIndex(data=values, name=self.name, freq=self.freq)
@property
def inferred_type(self):
# b/c data is represented as ints make sure we can't have ambiguous
# indexing
return 'period'
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
s = _values_from_object(series)
try:
return _maybe_box(self, super(PeriodIndex, self).get_value(s, key), series, key)
except (KeyError, IndexError):
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies._infer_period_group(reso)
freqn = frequencies._period_group(self.freq)
vals = self.values
# if our data is higher resolution than requested key, slice
if grp < freqn:
iv = Period(asdt, freq=(grp, 1))
ord1 = iv.asfreq(self.freq, how='S').ordinal
ord2 = iv.asfreq(self.freq, how='E').ordinal
if ord2 < vals[0] or ord1 > vals[-1]:
raise KeyError(key)
pos = np.searchsorted(self.values, [ord1, ord2])
key = slice(pos[0], pos[1] + 1)
return series[key]
elif grp == freqn:
key = Period(asdt, freq=self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
else:
raise KeyError(key)
except TypeError:
pass
key = Period(key, self.freq).ordinal
return _maybe_box(self, self._engine.get_value(s, key), series, key)
def get_indexer(self, target, method=None, limit=None):
if hasattr(target, 'freq') and target.freq != self.freq:
raise ValueError('target and index have different freq: '
'(%s, %s)' % (target.freq, self.freq))
return Index.get_indexer(self, target, method, limit)
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
try:
return self._engine.get_loc(key)
except KeyError:
if is_integer(key):
raise
try:
asdt, parsed, reso = parse_time_string(key, self.freq)
key = asdt
except TypeError:
pass
key = Period(key, self.freq)
try:
return Index.get_loc(self, key.ordinal, method=method)
except KeyError:
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string or a datetime, cast it to Period.ordinal according to
resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
bound : Period or object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if isinstance(label, datetime):
return Period(label, freq=self.freq)
elif isinstance(label, compat.string_types):
try:
_, parsed, reso = parse_time_string(label, self.freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
except Exception:
raise KeyError(label)
elif is_integer(label) or is_float(label):
self._invalid_indexer('slice',label)
return label
def _parsed_string_to_bounds(self, reso, parsed):
if reso == 'year':
t1 = Period(year=parsed.year, freq='A')
elif reso == 'month':
t1 = Period(year=parsed.year, month=parsed.month, freq='M')
elif reso == 'quarter':
q = (parsed.month - 1) // 3 + 1
t1 = Period(year=parsed.year, quarter=q, freq='Q-DEC')
elif reso == 'day':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
freq='D')
elif reso == 'hour':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, freq='H')
elif reso == 'minute':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, freq='T')
elif reso == 'second':
t1 = Period(year=parsed.year, month=parsed.month, day=parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second,
freq='S')
else:
raise KeyError(key)
return (t1.asfreq(self.freq, how='start'),
t1.asfreq(self.freq, how='end'))
def _get_string_slice(self, key):
if not self.is_monotonic:
raise ValueError('Partial indexing only valid for '
'ordered time series')
key, parsed, reso = parse_time_string(key, self.freq)
grp = frequencies._infer_period_group(reso)
freqn = frequencies._period_group(self.freq)
if reso in ['day', 'hour', 'minute', 'second'] and not grp < freqn:
raise KeyError(key)
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
return slice(self.searchsorted(t1.ordinal, side='left'),
self.searchsorted(t2.ordinal, side='right'))
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
self._assert_can_do_setop(other)
result = Int64Index.join(self, other, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
result, lidx, ridx = result
return self._apply_meta(result), lidx, ridx
return self._apply_meta(result)
def _assert_can_do_setop(self, other):
if not isinstance(other, PeriodIndex):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
raise ValueError('Only like-indexed PeriodIndexes compatible '
'for join (for now)')
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
result = self._apply_meta(result)
result.name = name
return result
def _apply_meta(self, rawarr):
if not isinstance(rawarr, PeriodIndex):
rawarr = PeriodIndex(rawarr, freq=self.freq)
return rawarr
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
val = getitem(key)
return Period(ordinal=val, freq=self.freq)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
result = getitem(key)
if result.ndim > 1:
# MPL kludge
# values = np.asarray(list(values), dtype=object)
# return values.reshape(result.shape)
return PeriodIndex(result, name=self.name, freq=self.freq)
return PeriodIndex(result, name=self.name, freq=self.freq)
def _format_native_types(self, na_rep=u('NaT'), **kwargs):
values = np.array(list(self), dtype=object)
mask = isnull(self.values)
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([u('%s') % dt for dt in values[imask]])
return values.tolist()
def __array_finalize__(self, obj):
if not self.ndim: # pragma: no cover
return self.item()
self.freq = getattr(obj, 'freq', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def _format_footer(self):
tagline = 'Length: %d, Freq: %s'
return tagline % (len(self), self.freqstr)
def take(self, indices, axis=None):
"""
Analogous to ndarray.take
"""
indices = com._ensure_platform_int(indices)
taken = self.values.take(indices, axis=axis)
return self._simple_new(taken, self.name, freq=self.freq)
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
if isinstance(to_concat[0], PeriodIndex):
if len(set([x.freq for x in to_concat])) > 1:
# box
to_concat = [x.asobject.values for x in to_concat]
else:
cat_values = np.concatenate([x.values for x in to_concat])
return PeriodIndex(cat_values, freq=self.freq, name=name)
to_concat = [x.values if isinstance(x, Index) else x
for x in to_concat]
return Index(com._concat_compat(to_concat), name=name)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(PeriodIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
try: # backcompat
self.freq = own_state[1]
except:
pass
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(self, state)
self._data = data
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
def tz_localize(self, tz, infer_dst=False):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
infer_dst : boolean, default False
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Note
----
Not currently implemented for PeriodIndex
"""
raise NotImplementedError("Not yet implemented for PeriodIndex")
PeriodIndex._add_numeric_methods_disabled()
PeriodIndex._add_logical_methods_disabled()
PeriodIndex._add_datetimelike_methods()
def _get_ordinal_range(start, end, periods, freq):
if com._count_not_none(start, end, periods) < 2:
raise ValueError('Must specify 2 of start, end, periods')
if start is not None:
start = Period(start, freq)
if end is not None:
end = Period(end, freq)
is_start_per = isinstance(start, Period)
is_end_per = isinstance(end, Period)
if is_start_per and is_end_per and start.freq != end.freq:
raise ValueError('Start and end must have same freq')
if ((is_start_per and start.ordinal == tslib.iNaT) or
(is_end_per and end.ordinal == tslib.iNaT)):
raise ValueError('Start and end must not be NaT')
if freq is None:
if is_start_per:
freq = start.freq
elif is_end_per:
freq = end.freq
else: # pragma: no cover
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)
return data, freq
def _range_from_fields(year=None, month=None, quarter=None, day=None,
hour=None, minute=None, second=None, freq=None):
if hour is None:
hour = 0
if minute is None:
minute = 0
if second is None:
second = 0
if day is None:
day = 1
ordinals = []
if quarter is not None:
if freq is None:
freq = 'Q'
base = frequencies.FreqGroup.FR_QTR
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
if base != frequencies.FreqGroup.FR_QTR:
raise AssertionError("base must equal FR_QTR")
year, quarter = _make_field_arrays(year, quarter)
for y, q in zip(year, quarter):
y, m = _quarter_to_myear(y, q, freq)
val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base)
ordinals.append(val)
else:
base, mult = _gfc(freq)
if mult != 1:
raise ValueError('Only mult == 1 supported')
arrays = _make_field_arrays(year, month, day, hour, minute, second)
for y, mth, d, h, mn, s in zip(*arrays):
ordinals.append(period.period_ordinal(y, mth, d, h, mn, s, 0, 0, base))
return np.array(ordinals, dtype=np.int64), freq
def _make_field_arrays(*fields):
length = None
for x in fields:
if isinstance(x, (list, np.ndarray, ABCSeries)):
if length is not None and len(x) != length:
raise ValueError('Mismatched Period array lengths')
elif length is None:
length = len(x)
arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries))
else np.repeat(x, length) for x in fields]
return arrays
def pnow(freq=None):
return Period(datetime.now(), freq=freq)
def period_range(start=None, end=None, periods=None, freq='D', name=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start :
end :
periods : int, default None
Number of periods in the index
freq : str/DateOffset, default 'D'
Frequency alias
name : str, default None
Name for the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
"""
return PeriodIndex(start=start, end=end, periods=periods,
freq=freq, name=name)
|
gpl-2.0
|
FedericoFontana/backtester
|
tests/test_portfolio_check_inputs.py
|
1
|
9299
|
import pytest
import numpy as np
import pandas as pd
from pandas import date_range as t
from datetime import datetime as day
from backtester.portfolio import Portfolio
def test_weights_type_reject_when_wrong():
prices = pd.DataFrame([[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-02'))
with pytest.raises(TypeError):
Portfolio(weights=pd.Series(), prices=prices)
with pytest.raises(TypeError):
Portfolio(weights=dict(), prices=prices)
with pytest.raises(TypeError):
Portfolio(weights=list(), prices=prices)
def test_weights_index_reject_when_wrong():
# Initialise parameters. What we test here is the index for weights.
prices = pd.DataFrame([[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-02'))
weights = pd.DataFrame([[.5, .5],
[.5, .5]])
with pytest.raises(IndexError):
# Index is numeric.
weights.index = [0, 1]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is string.
weights.index = ['2017-01-01', '2017-01-02']
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is also string.
weights.index = ['2017-01-01', day(2017, 1, 2)]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is not chronological.
weights.index = t('2017-01-01', '2017-01-02')[::-1]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is not chronological.
weights.index = [day(2017, 1, 2), day(2017, 1, 1)]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is not unique.
weights.index = [day(2017, 1, 1), day(2017, 1, 1)]
Portfolio(weights=weights, prices=prices)
def test_weights_values_reject_when_wrong():
prices = pd.DataFrame([[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-02'))
weights_index = t('2017-01-01', '2017-01-02')
with pytest.raises(ValueError):
# Weights with None.
weights = pd.DataFrame([[.5, .5],
[.5, None]], index=weights_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Weights with numpy.nan.
weights = pd.DataFrame([[.5, .5],
[.5, np.nan]], index=weights_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Weights with string.
weights = pd.DataFrame([[1, '0'],
[0, '1']], index=weights_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Weights with rows not summing to one.
weights = pd.DataFrame([[.5, .49],
[.5, .5]], index=weights_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Weights are negative.
weights = pd.DataFrame([[-1, 2],
[0, 1]], index=weights_index)
Portfolio(weights=weights, prices=prices)
def test_weights_columns_reject_when_wrong():
with pytest.raises(KeyError):
# Duplicate columns.
prices = pd.DataFrame([[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-02'))
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-01', '2017-01-02'),
columns=['col1', 'col1'])
Portfolio(weights=weights, prices=prices)
def test_prices_type_reject_when_wrong():
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-01', '2017-01-02'))
with pytest.raises(TypeError):
Portfolio(weights=weights, prices=pd.Series())
with pytest.raises(TypeError):
Portfolio(weights=weights, prices=dict())
with pytest.raises(TypeError):
Portfolio(weights=weights, prices=list())
def test_prices_index_reject_when_wrong():
# Initialise parameters. What we test here is the index for prices.
prices = pd.DataFrame([[1, 2],
[2, 4]])
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-01', '2017-01-02'))
with pytest.raises(IndexError):
# Index is numeric.
prices.index = [0, 1]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is string.
prices.index = ['2017-01-01', '2017-01-02']
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is also string.
prices.index = ['2017-01-01', day(2017, 1, 2)]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is not chronological.
prices.index = t('2017-01-01', '2017-01-02')[::-1]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is not chronological.
prices.index = [day(2017, 1, 2), day(2017, 1, 1)]
Portfolio(weights=weights, prices=prices)
with pytest.raises(IndexError):
# Index is not unique.
prices.index = [day(2017, 1, 1), day(2017, 1, 1)]
Portfolio(weights=weights, prices=prices)
def test_prices_values_reject_when_wrong():
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-01', '2017-01-02'))
prices_index = t('2017-01-01', '2017-01-02')
with pytest.raises(ValueError):
# Prices with None.
prices = pd.DataFrame([[.5, .5],
[.5, None]], index=prices_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Prices with numpy.nan.
prices = pd.DataFrame([[.5, .5],
[.5, np.nan]], index=prices_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Prices with string.
prices = pd.DataFrame([[1, '0'],
[0, '1']], index=prices_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Prices are negative.
prices = pd.DataFrame([[-1, 2],
[0, 1]], index=prices_index)
Portfolio(weights=weights, prices=prices)
with pytest.raises(ValueError):
# Prices are zero.
prices = pd.DataFrame([[1, 2],
[0, 1]], index=prices_index)
Portfolio(weights=weights, prices=prices)
def test_prices_columns_reject_when_wrong():
with pytest.raises(KeyError):
# Duplicate columns.
prices = pd.DataFrame([[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-02'),
columns=['col1', 'col1'])
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-01', '2017-01-02'))
Portfolio(weights=weights, prices=prices)
def test_weights_and_prices_index():
with pytest.raises(IndexError):
# All dates in weights must be in prices.
prices = pd.DataFrame([[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-02'))
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-02', '2017-01-03'))
Portfolio(weights=weights, prices=prices)
def test_benchmark_is_a_column_in_prices():
# The declared benchmark symbol must be a column of prices.
with pytest.raises(KeyError):
prices = pd.DataFrame([[1, 2],
[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-03'),
columns=['Stock', 'S&P500'])
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-02', '2017-01-03'),
columns=['Stock', 'S&P500'])
Portfolio(weights, prices, benchmark='MSCI World')
def test_risk_free_is_a_column_in_prices():
# The declared risk free symbol must be a column of prices.
with pytest.raises(KeyError):
prices = pd.DataFrame([[1, 2],
[1, 2],
[2, 4]],
index=t('2017-01-01', '2017-01-03'),
columns=['Stock', 'T-Bill]'])
weights = pd.DataFrame([[.5, .5],
[.5, .5]],
index=t('2017-01-02', '2017-01-03'),
columns=['Stock', 'T-Bill]'])
Portfolio(weights, prices, risk_free='BOT')
|
gpl-3.0
|
waltervh/BornAgain
|
dev-tools/mpi/pytests/mpi_cpp_test.py
|
3
|
2236
|
# Mixture of cylinders and prisms without interference
from mpi4py import MPI # this line has to be first
import numpy
import matplotlib
import pylab
from libBornAgainCore import *
comm = MPI.COMM_WORLD
world_size = comm.Get_size()
world_rank = comm.Get_rank()
def get_sample():
"""
Build and return the sample representing cylinders and pyramids on top of
substrate without interference.
"""
# defining materials
m_air = HomogeneousMaterial("Air", 0.0, 0.0)
m_substrate = HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
cylinder_ff = FormFactorCylinder(5*nanometer, 5*nanometer)
cylinder = Particle(m_particle, cylinder_ff)
prism_ff = FormFactorPrism3(10*nanometer, 5*nanometer)
prism = Particle(m_particle, prism_ff)
particle_layout = ParticleLayout()
particle_layout.addParticle(cylinder, 0.5)
particle_layout.addParticle(prism, 0.5)
# air layer with particles and substrate form multi layer
air_layer = Layer(m_air)
air_layer.addLayout(particle_layout)
substrate_layer = Layer(m_substrate)
multi_layer = MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation():
"""
Create and return GISAS simulation with beam and detector defined
"""
simulation = GISASSimulation()
simulation.setDetectorParameters(100, -1.0*degree, 1.0*degree, 100, 0.0*degree, 2.0*degree)
simulation.setBeamParameters(1.0*angstrom, 0.2*degree, 0.0*degree)
sample = get_sample()
simulation.setSample(sample)
return simulation
def run_simulation():
"""
Run simulation and plot results
"""
simulation = get_simulation()
if(world_size == 1):
print("Not an MPI environment, run with 'mpirun -n 4 python ompi_sim_example.py'")
exit(0)
simulation.runMPISimulation()
if(world_rank == 0):
sumresult = simulation.result().array()
print(sumresult)
# pylab.imshow(sumresult + 1, norm=matplotlib.colors.LogNorm(), extent=[-1.0, 1.0, 0, 2.0])
# pylab.show()
if __name__ == '__main__':
run_simulation()
|
gpl-3.0
|
alexeyum/scikit-learn
|
sklearn/__check_build/__init__.py
|
345
|
1671
|
""" Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.