repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ryfeus/lambda-packs | Pandas_numpy/source/pandas/core/missing.py | 6 | 23369 | """
Routines for filling missing data
"""
import numpy as np
from distutils.version import LooseVersion
from pandas._libs import algos, lib
from pandas.compat import range, string_types
from pandas.core.dtypes.common import (
is_numeric_v_string_like,
is_float_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_scalar,
is_integer,
needs_i8_conversion,
_ensure_float64)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
dtype, values_to_mask = infer_dtype_from_array(values_to_mask)
try:
values_to_mask = np.array(values_to_mask, dtype=dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isna(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask = False
else:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if is_scalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
# numpy elementwise comparison warning
if is_numeric_v_string_like(arr, x):
mask |= False
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isna(arr)
else:
mask |= isna(arr)
return mask
def clean_fill_method(method, allow_nearest=False):
# asfreq is compat for resampling
if method in [None, 'asfreq']:
return None
if isinstance(method, string_types):
method = method.lower()
if method == 'ffill':
method = 'pad'
elif method == 'bfill':
method = 'backfill'
valid_methods = ['pad', 'backfill']
expecting = 'pad (ffill) or backfill (bfill)'
if allow_nearest:
valid_methods.append('nearest')
expecting = 'pad (ffill), backfill (bfill) or nearest'
if method not in valid_methods:
msg = ('Invalid fill method. Expecting {expecting}. Got {method}'
.format(expecting=expecting, method=method))
raise ValueError(msg)
return method
def clean_interp_method(method, **kwargs):
order = kwargs.get('order')
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh',
'piecewise_polynomial', 'pchip', 'akima', 'spline',
'from_derivatives']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {valid}. Got '{method}' "
"instead.".format(valid=valid, method=method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
limit_direction='forward', fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isna(yvalues)
valid = ~invalid
if not valid.any():
# have to call np.asarray(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if valid.all():
return yvalues
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
valid_limit_directions = ['forward', 'backward', 'both']
limit_direction = limit_direction.lower()
if limit_direction not in valid_limit_directions:
msg = ('Invalid limit_direction: expecting one of {valid!r}, '
'got {invalid!r}.')
raise ValueError(msg.format(valid=valid_limit_directions,
invalid=limit_direction))
from pandas import Series
ys = Series(yvalues)
start_nans = set(range(ys.first_valid_index()))
end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
# violate_limit is a list of the indexes in the series whose yvalue is
# currently NaN, and should still be NaN after the interpolation.
# Specifically:
#
# If limit_direction='forward' or None then the list will contain NaNs at
# the beginning of the series, and NaNs that are more than 'limit' away
# from the prior non-NaN.
#
# If limit_direction='backward' then the list will contain NaNs at
# the end of the series, and NaNs that are more than 'limit' away
# from the subsequent non-NaN.
#
# If limit_direction='both' then the list will contain NaNs that
# are more than 'limit' away from any non-NaN.
#
# If limit=None, then use default behavior of filling an unlimited number
# of NaNs in the direction specified by limit_direction
# default limit is unlimited GH #16282
if limit is None:
# limit = len(xvalues)
pass
elif not is_integer(limit):
raise ValueError('Limit must be an integer')
elif limit < 1:
raise ValueError('Limit must be greater than 0')
# each possible limit_direction
# TODO: do we need sorted?
if limit_direction == 'forward' and limit is not None:
violate_limit = sorted(start_nans |
set(_interp_limit(invalid, limit, 0)))
elif limit_direction == 'forward':
violate_limit = sorted(start_nans)
elif limit_direction == 'backward' and limit is not None:
violate_limit = sorted(end_nans |
set(_interp_limit(invalid, 0, limit)))
elif limit_direction == 'backward':
violate_limit = sorted(end_nans)
elif limit_direction == 'both' and limit is not None:
violate_limit = sorted(_interp_limit(invalid, limit, limit))
else:
violate_limit = []
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
result = yvalues.copy()
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if needs_i8_conversion(inds.dtype.type):
inds = inds.view(np.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(np.int64)
result[invalid] = _interpolate_scipy_wrapper(inds[valid],
yvalues[valid],
inds[invalid],
method=method,
fill_value=fill_value,
bounds_error=bounds_error,
order=order, **kwargs)
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
# TODO: Why is DatetimeIndex being imported here?
from pandas import DatetimeIndex # noqa
except ImportError:
raise ImportError('{method} interpolation requires SciPy'
.format(method=method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'from_derivatives': _from_derivatives,
'piecewise_polynomial': _from_derivatives,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x._values.astype('i8'), new_x.astype('i8')
if method == 'pchip':
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
raise ImportError("Your version of Scipy does not support "
"PCHIP interpolation.")
elif method == 'akima':
try:
from scipy.interpolate import Akima1DInterpolator # noqa
alt_methods['akima'] = _akima_interpolate
except ImportError:
raise ImportError("Your version of Scipy does not support "
"Akima interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
# GH #10633
if not order:
raise ValueError("order needs to be specified and greater than 0")
terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x, **kwargs)
return new_y
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False):
"""
Convenience function for interpolate.BPoly.from_derivatives
Construct a piecewise polynomial in the Bernstein basis, compatible
with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array-likes
yi[i][j] is the j-th derivative known at xi[i]
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
der : int or list
How many derivatives to extract; None for all potentially nonzero
derivatives (that is a number equal to the number of points), or a
list of derivatives to extract. This numberincludes the function
value as 0th derivative.
extrapolate : bool, optional
Whether to extrapolate to ouf-of-bounds points based on first and last
intervals, or to return NaNs. Default: True.
See Also
--------
scipy.interpolate.BPoly.from_derivatives
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
import scipy
from scipy import interpolate
if LooseVersion(scipy.__version__) < '0.18.0':
try:
method = interpolate.piecewise_polynomial_interpolate
return method(xi, yi.reshape(-1, 1), x,
orders=order, der=der)
except AttributeError:
pass
# return the method for compat with scipy version & backwards compat
method = interpolate.BPoly.from_derivatives
m = method(xi, yi.reshape(-1, 1),
orders=order, extrapolate=extrapolate)
return m(x)
def _akima_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for akima interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``.
See `Akima1DInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
How many derivatives to extract; None for all potentially
nonzero derivatives (that is a number equal to the number
of points), or a list of derivatives to extract. This number
includes the function value as 0th derivative.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
scipy.interpolate.Akima1DInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
from scipy import interpolate
try:
P = interpolate.Akima1DInterpolator(xi, yi, axis=axis)
except TypeError:
# Scipy earlier than 0.17.0 missing axis
P = interpolate.Akima1DInterpolator(xi, yi)
if der == 0:
return P(x)
elif interpolate._isscalar(der):
return P(x, der=der)
else:
return [P(x, nu) for nu in der]
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None,
dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(
transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64, np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'pad_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'backfill_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'pad_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
name = 'backfill_2d_inplace_{name}'.format(name=dtype.name)
_method = getattr(algos, name, None)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [{name}]'
.format(name=dtype.name))
if mask is None:
mask = isna(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def get_fill_func(method):
method = clean_fill_method(method)
return _fill_methods[method]
def clean_reindex_fill_method(method):
return clean_fill_method(method, allow_nearest=True)
def fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is None or is_float_dtype(result):
return result
if name.startswith(('r', '__r')):
x, y = y, x
is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type'))
is_scalar_type = is_scalar(y)
if not is_variable_type and not is_scalar_type:
return result
if is_scalar_type:
y = np.array(y)
if is_integer_dtype(y):
if (y == 0).any():
# GH 7325, mask and nans must be broadcastable (also: PR 9308)
# Raveling and then reshaping makes np.putmask faster
mask = ((y == 0) & ~np.isnan(result)).ravel()
shape = result.shape
result = result.astype('float64', copy=False).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it correctly
# (GH 6178 and PR 9308)
if np.isinf(fill):
signs = np.sign(y if name.startswith(('r', '__r')) else x)
negative_inf_mask = (signs.ravel() < 0) & mask
np.putmask(result, negative_inf_mask, -fill)
if "floordiv" in name: # (PR 9308)
nan_mask = ((y == 0) & (x == 0)).ravel()
np.putmask(result, nan_mask, np.nan)
result = result.reshape(shape)
return result
def _interp_limit(invalid, fw_limit, bw_limit):
"""Get idx of values that won't be filled b/c they exceed the limits.
This is equivalent to the more readable, but slower
.. code-block:: python
for x in np.where(invalid)[0]:
if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
yield x
"""
# handle forward first; the backward direction is the same except
# 1. operate on the reversed array
# 2. subtract the returned indicies from N - 1
N = len(invalid)
def inner(invalid, limit):
limit = min(limit, N)
windowed = _rolling_window(invalid, limit + 1).all(1)
idx = (set(np.where(windowed)[0] + limit) |
set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0]))
return idx
if fw_limit == 0:
f_idx = set(np.where(invalid)[0])
else:
f_idx = inner(invalid, fw_limit)
if bw_limit == 0:
# then we don't even need to care about backwards, just use forwards
return f_idx
else:
b_idx = set(N - 1 - np.asarray(list(inner(invalid[::-1], bw_limit))))
if fw_limit == 0:
return b_idx
return f_idx & b_idx
def _rolling_window(a, window):
"""
[True, True, False, True, False], 2 ->
[
[True, True],
[True, False],
[False, True],
[True, False],
]
"""
# https://stackoverflow.com/a/6811241
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
| mit |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/pandas/stats/tests/common.py | 9 | 4394 | # pylint: disable-msg=W0611,W0402
# flake8: noqa
from datetime import datetime
import string
import nose
import numpy as np
from pandas import DataFrame, bdate_range
from pandas.util.testing import assert_almost_equal # imported in other tests
import pandas.util.testing as tm
N = 100
K = 4
start = datetime(2007, 1, 1)
DATE_RANGE = bdate_range(start, periods=N)
COLS = ['Col' + c for c in string.ascii_uppercase[:K]]
def makeDataFrame():
data = DataFrame(np.random.randn(N, K),
columns=COLS,
index=DATE_RANGE)
return data
def getBasicDatasets():
A = makeDataFrame()
B = makeDataFrame()
C = makeDataFrame()
return A, B, C
def check_for_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest('no scipy')
def check_for_statsmodels():
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
raise nose.SkipTest('no statsmodels')
class BaseTest(tm.TestCase):
def setUp(self):
check_for_scipy()
check_for_statsmodels()
self.A, self.B, self.C = getBasicDatasets()
self.createData1()
self.createData2()
self.createData3()
def createData1(self):
date = datetime(2007, 1, 1)
date2 = datetime(2007, 1, 15)
date3 = datetime(2007, 1, 22)
A = self.A.copy()
B = self.B.copy()
C = self.C.copy()
A['ColA'][date] = np.NaN
B['ColA'][date] = np.NaN
C['ColA'][date] = np.NaN
C['ColA'][date2] = np.NaN
# truncate data to save time
A = A[:30]
B = B[:30]
C = C[:30]
self.panel_y = A
self.panel_x = {'B': B, 'C': C}
self.series_panel_y = A.filter(['ColA'])
self.series_panel_x = {'B': B.filter(['ColA']),
'C': C.filter(['ColA'])}
self.series_y = A['ColA']
self.series_x = {'B': B['ColA'],
'C': C['ColA']}
def createData2(self):
y_data = [[1, np.NaN],
[2, 3],
[4, 5]]
y_index = [datetime(2000, 1, 1),
datetime(2000, 1, 2),
datetime(2000, 1, 3)]
y_cols = ['A', 'B']
self.panel_y2 = DataFrame(np.array(y_data), index=y_index,
columns=y_cols)
x1_data = [[6, np.NaN],
[7, 8],
[9, 30],
[11, 12]]
x1_index = [datetime(2000, 1, 1),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4)]
x1_cols = ['A', 'B']
x1 = DataFrame(np.array(x1_data), index=x1_index,
columns=x1_cols)
x2_data = [[13, 14, np.NaN],
[15, np.NaN, np.NaN],
[16, 17, 48],
[19, 20, 21],
[22, 23, 24]]
x2_index = [datetime(2000, 1, 1),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5)]
x2_cols = ['C', 'A', 'B']
x2 = DataFrame(np.array(x2_data), index=x2_index,
columns=x2_cols)
self.panel_x2 = {'x1': x1, 'x2': x2}
def createData3(self):
y_data = [[1, 2],
[3, 4]]
y_index = [datetime(2000, 1, 1),
datetime(2000, 1, 2)]
y_cols = ['A', 'B']
self.panel_y3 = DataFrame(np.array(y_data), index=y_index,
columns=y_cols)
x1_data = [['A', 'B'],
['C', 'A']]
x1_index = [datetime(2000, 1, 1),
datetime(2000, 1, 2)]
x1_cols = ['A', 'B']
x1 = DataFrame(np.array(x1_data), index=x1_index,
columns=x1_cols)
x2_data = [['foo', 'bar'],
['baz', 'foo']]
x2_index = [datetime(2000, 1, 1),
datetime(2000, 1, 2)]
x2_cols = ['A', 'B']
x2 = DataFrame(np.array(x2_data), index=x2_index,
columns=x2_cols)
self.panel_x3 = {'x1': x1, 'x2': x2}
| apache-2.0 |
nrkumar93/bnr_workspace | label_training_detection/scripts/plot_detection_time_scale_factor.py | 1 | 4096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# batch_label_localization_top_level.py
#
# Copyright 2016 Prasanna <prasanna@prasanna-ThinkStation-P300>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import json
import argparse
import subprocess
import math
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description="Use label_defs to set params. Open this program to set step size and aspect ratio.")
parser.add_argument("label_defs", help="The path to label_defs.json file")
parser.add_argument("input_dir", help="The directory containing symlinks to input images.")
args = parser.parse_args()
step_size = 0.02
aspect_ratio = 1.75
avg_time = []
x = []
yy = []
diag_dist = []
for i in range(40):
label_defs = open(args.label_defs, 'r').read()
defs = json.loads(label_defs)
defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor'] = defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor'] + (i*step_size)
with open('/tmp/label_defs.json', 'w') as outfile:
json.dump(defs, outfile)
print defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor']
command = 'rosrun bnr_analytics_labels label_detect_tool '
command += ' --images_path ' + args.input_dir
command += ' --label_defs_json ' + '/tmp/label_defs.json'
print '========================================EXECUTING========================================'
print command
print '========================================================================================='
cmd = command.split()
ret = subprocess.Popen(cmd, stdout=subprocess.PIPE)
avg_time.append(float(ret.stdout.readlines()[-1].replace('\n', '')))
diag_dist.append(math.sqrt(defs['label_definitions'][0]['classifier_types']['source'][0]['maxSize_height']**2 + defs['label_definitions'][0]['classifier_types']['source'][0]['maxSize_width']**2) - math.sqrt(defs['label_definitions'][0]['classifier_types']['source'][0]['minSize_height']**2 + defs['label_definitions'][0]['classifier_types']['source'][0]['minSize_width']**2))
x.append(defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor'])
yy.append(math.log(defs['label_definitions'][0]['classifier_types']['source'][0]['maxSize_width']/defs['label_definitions'][0]['classifier_types']['source'][0]['minSize_width'], defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor']))
print avg_time
print x
if defs['label_definitions'][0]['classifier_types']['source'][0]['minSize_height']*defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor'] > defs['label_definitions'][0]['classifier_types']['source'][0]['maxSize_height'] or defs['label_definitions'][0]['classifier_types']['source'][0]['minSize_width']*defs['label_definitions'][0]['classifier_types']['source'][0]['scale_factor'] > defs['label_definitions'][0]['classifier_types']['source'][0]['maxSize_width']:
x = x[0:len(x)-1]
avg_time = avg_time[0:len(avg_time)-1]
break
plt.plot(x, avg_time, '-o', linewidth=1.0, label='Observed')
plt.plot(x, yy, '-x', linewidth=1.0, label='Theoretical')
plt.xlabel('Scale Factor ($\gamma$)')
plt.ylabel('Time taken (ms)')
plt.legend(loc='upper left')
plt.ylim((0, max(avg_time)+3))
plt.show()
return 0
if __name__ == '__main__':
main()
| gpl-3.0 |
upliftaero/MissionPlanner | Lib/site-packages/numpy/doc/creation.py | 94 | 5411 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or record arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard
to convert are libraries like PIL (able to read and write many image formats
such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
| gpl-3.0 |
AlexRobson/nilmtk | nilmtk/feature_detectors/cluster.py | 6 | 5343 | from __future__ import print_function, division
import numpy as np
import pandas as pd
# Fix the seed for repeatability of experiments
SEED = 42
np.random.seed(SEED)
def cluster(X, max_num_clusters=3, exact_num_clusters=None):
'''Applies clustering on reduced data,
i.e. data where power is greater than threshold.
Parameters
----------
X : pd.Series or single-column pd.DataFrame
max_num_clusters : int
Returns
-------
centroids : ndarray of int32s
Power in different states of an appliance, sorted
'''
# Find where power consumption is greater than 10
data = _transform_data(X)
# Find clusters
centroids = _apply_clustering(data, max_num_clusters, exact_num_clusters)
centroids = np.append(centroids, 0) # add 'off' state
centroids = np.round(centroids).astype(np.int32)
centroids = np.unique(centroids) # np.unique also sorts
# TODO: Merge similar clusters
return centroids
def _transform_data(data):
'''Subsamples if needed and converts to column vector (which is what
scikit-learn requires).
Parameters
----------
data : pd.Series or single column pd.DataFrame
Returns
-------
data_above_thresh : ndarray
column vector
'''
MAX_NUMBER_OF_SAMPLES = 2000
MIN_NUMBER_OF_SAMPLES = 20
DATA_THRESHOLD = 10
data_above_thresh = data[data > DATA_THRESHOLD].dropna().values
n_samples = len(data_above_thresh)
if n_samples < MIN_NUMBER_OF_SAMPLES:
return np.zeros((MAX_NUMBER_OF_SAMPLES, 1))
elif n_samples > MAX_NUMBER_OF_SAMPLES:
# Randomly subsample (we don't want to smoothly downsample
# because that is likely to change the values)
random_indices = np.random.randint(0, n_samples, MAX_NUMBER_OF_SAMPLES)
resampled = data_above_thresh[random_indices]
return resampled.reshape(MAX_NUMBER_OF_SAMPLES, 1)
else:
return data_above_thresh.reshape(n_samples, 1)
def _apply_clustering_n_clusters(X, n_clusters):
"""
:param X: ndarray
:param n_clusters: exact number of clusters to use
:return:
"""
from sklearn.cluster import KMeans
k_means = KMeans(init='k-means++', n_clusters=n_clusters)
k_means.fit(X)
return k_means.labels_, k_means.cluster_centers_
def _apply_clustering(X, max_num_clusters, exact_num_clusters=None):
'''
Parameters
----------
X : ndarray
max_num_clusters : int
Returns
-------
centroids : list of numbers
List of power in different states of an appliance
'''
# If we import sklearn at the top of the file then it makes autodoc fail
from sklearn import metrics
# sklearn produces lots of DepreciationWarnings with PyTables
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Finds whether 2 or 3 gives better Silhouellete coefficient
# Whichever is higher serves as the number of clusters for that
# appliance
num_clus = -1
sh = -1
k_means_labels = {}
k_means_cluster_centers = {}
k_means_labels_unique = {}
# If the exact number of clusters are specified, then use that
if exact_num_clusters is not None:
labels, centers = _apply_clustering_n_clusters(X, exact_num_clusters)
return centers.flatten()
# Exact number of clusters are not specified, use the cluster validity measures
# to find the optimal number
for n_clusters in range(1, max_num_clusters):
try:
labels, centers = _apply_clustering_n_clusters(X, n_clusters)
k_means_labels[n_clusters] = labels
k_means_cluster_centers[n_clusters] = centers
k_means_labels_unique[n_clusters] = np.unique(labels)
try:
sh_n = metrics.silhouette_score(
X, k_means_labels[n_clusters], metric='euclidean')
if sh_n > sh:
sh = sh_n
num_clus = n_clusters
except Exception:
num_clus = n_clusters
except Exception:
if num_clus > -1:
return k_means_cluster_centers[num_clus]
else:
return np.array([0])
return k_means_cluster_centers[num_clus].flatten()
def hart85_means_shift_cluster(pair_buffer_df, cols):
from sklearn.cluster import MeanShift
# Creating feature vector
cluster_df = pd.DataFrame()
power_types = [col[1] for col in cols]
if 'active' in power_types:
cluster_df['active'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Active']) + np.fabs(row['T2 Active'])) / 2), axis=1), index=pair_buffer_df.index)
if 'reactive' in power_types:
cluster_df['reactive'] = pd.Series(pair_buffer_df.apply(lambda row:
((np.fabs(row['T1 Reactive']) + np.fabs(row['T2 Reactive'])) / 2), axis=1), index=pair_buffer_df.index)
X = cluster_df.values.reshape((len(cluster_df.index), len(cols)))
ms = MeanShift(bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
return pd.DataFrame(cluster_centers, columns=cols)
| apache-2.0 |
tdent/pycbc | examples/distributions/spin_examples.py | 14 | 1894 | import matplotlib.pyplot as plt
import numpy
import pycbc.coordinates as co
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in
# units of pi so we use between 0 and 1
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py. Here we are using the
# Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_ bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution. In this
# case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=500000)
# Make spins with unit length for coordinate transformation below.
spin_mag = numpy.ndarray(shape=(500000), dtype=float)
for i in range(0,500000):
spin_mag[i] = 1.
# Use the pycbc.coordinates as co spherical_to_cartesian function to convert
# from spherical polar coordinates to cartesian coordinates
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Choose 50 bins for the histograms.
n_bins = 50
plt.figure(figsize=(10,10))
plt.subplot(2, 2, 1)
plt.hist(spinx, bins = n_bins)
plt.title('Spin x samples')
plt.subplot(2, 2, 2)
plt.hist(spiny, bins = n_bins)
plt.title('Spin y samples')
plt.subplot(2, 2, 3)
plt.hist(spinz, bins = n_bins)
plt.title('Spin z samples')
plt.tight_layout()
plt.show()
| gpl-3.0 |
CartoDB/cartoframes | tests/unit/analysis/test_grid.py | 1 | 2773 | # """Unit tests for cartoframes.analysis.grid"""
import os
import pytest
import numpy as np
from pandas import read_csv
from geopandas import GeoDataFrame
from shapely.geometry import box, shape
from cartoframes.analysis.grid import QuadGrid
from cartoframes.utils.geom_utils import set_geometry
from geopandas.testing import assert_geodataframe_equal
# DATA FRAME SRC BBOX
pol_1 = box(1, 1, 2, 2)
pol_2 = box(3, 3, 4, 4)
GDF_BOX = GeoDataFrame({'id': [1, 2], 'geom': [pol_1, pol_2]}, columns=['id', 'geom'], geometry='geom')
pol_geojson = {
'type': 'Polygon',
'coordinates': [
[
[
-5.899658203125,
38.436379603
],
[
-6.690673828125,
37.67512527892127
],
[
-6.15234375,
37.43997405227057
],
[
-5.8447265625,
37.70120736474139
],
[
-6.13037109375,
37.82280243352756
],
[
-5.877685546874999,
38.02213147353745
],
[
-6.009521484375,
38.12591462924157
],
[
-5.5810546875,
38.1777509666256
],
[
-5.899658203125,
38.436379603
]
]
]
}
GDF_IRREGULAR = GeoDataFrame({'id': [1], 'geom': [shape(pol_geojson)]}, columns=['id', 'geom'], geometry='geom')
BASE_FOLDER = os.path.dirname(os.path.abspath(__file__))
class TestGrid(object):
def _load_test_gdf(self, fname):
fname = os.path.join(BASE_FOLDER, fname)
df = read_csv(fname, dtype={'id': np.int64, 'geom': object, 'quadkey': object})
gdf = GeoDataFrame(df, crs='epsg:4326')
set_geometry(gdf, 'geom', inplace=True)
return gdf
@pytest.mark.skip()
def test_quadgrid_polyfill_box(self, mocker):
"""cartoframes.analysis.grid.QuadGrid.polyfill"""
gdf = QuadGrid().polyfill(GDF_BOX, 12)
assert isinstance(gdf, GeoDataFrame)
# Check both dataframes are equals
gdf_test = self._load_test_gdf('grid_quadkey_bbox.csv')
assert_geodataframe_equal(gdf, gdf_test, check_less_precise=True)
@pytest.mark.skip()
def test_quadgrid_polyfill_pol(self, mocker):
"""cartoframes.analysis.grid.QuadGrid.polyfill"""
gdf = QuadGrid().polyfill(GDF_IRREGULAR, 12)
assert isinstance(gdf, GeoDataFrame)
# Check both dataframes are equals
gdf_test = self._load_test_gdf('grid_quadkey_pol.csv')
assert_geodataframe_equal(gdf, gdf_test, check_less_precise=True)
| bsd-3-clause |
madelynfreed/rlundo | venv/lib/python2.7/site-packages/IPython/kernel/zmq/ipkernel.py | 4 | 13846 | """The IPython kernel implementation"""
import getpass
import sys
import traceback
from IPython.core import release
from IPython.utils.py3compat import builtin_mod, PY3
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from IPython.utils.traitlets import Instance, Type, Any, List
from IPython.utils.decorators import undoc
from ..comm import CommManager
from .kernelbase import Kernel as KernelBase
from .serialize import serialize_object, unpack_apply_message
from .zmqshell import ZMQInteractiveShell
def lazy_import_handle_comm_opened(*args, **kwargs):
from IPython.html.widgets import Widget
Widget.handle_comm_opened(*args, **kwargs)
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.shell.data_pub.session = self.session
self.shell.data_pub.pub_socket = self.iopub_socket
# TMP - hack while developing
self.shell._reply_content = None
self.comm_manager = CommManager(shell=self.shell, parent=self,
kernel=self)
self.comm_manager.register_target('ipython.widget', lazy_import_handle_comm_opened)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
help_links = List([
{
'text': "Python",
'url': "http://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython",
'url': "http://ipython.org/documentation.html",
},
{
'text': "NumPy",
'url': "http://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy",
'url': "http://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib",
'url': "http://matplotlib.org/contents.html",
},
{
'text': "SymPy",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas",
'url': "http://pandas.pydata.org/pandas-docs/stable/",
},
])
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython',
'version': sys.version_info[0]},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
# FIXME: the shell calls the exception handler itself.
shell._reply_content = None
try:
shell.run_cell(code, store_history=store_history, silent=silent)
except:
status = u'error'
# FIXME: this code right now isn't being used yet by default,
# because the run_cell() call above directly fires off exception
# reporting. This code, therefore, is only active in the scenario
# where runlines itself has an unhandled exception. We need to
# uniformize this, for all exception construction to come from a
# single location in the codbase.
etype, evalue, tb = sys.exc_info()
tb_list = traceback.format_exception(etype, evalue, tb)
reply_content.update(shell._showtraceback(etype, evalue, tb_list))
else:
status = u'ok'
finally:
self._restore_input()
reply_content[u'status'] = status
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
# FIXME - fish exception info out of shell, possibly left there by
# runlines. We'll need to clean up this logic later.
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='execute')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and clear the payload system always.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be agressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status' : 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=None, start=None,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {'history' : list(hist)}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_transformer_manager.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except:
# invoke IPython traceback formatting
shell.showtraceback()
# FIXME - fish exception info out of shell, possibly left there by
# run_code. We'll need to clean up this logic later.
reply_content = {}
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
if reply_content['ename'] == 'UnmetDependency':
reply_metadata['dependencies_met'] = False
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
@undoc
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of IPython.kernel.zmq.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs) | gpl-3.0 |
rbooth200/DiscEvolution | scripts/plot_planet_M-R.py | 1 | 2774 | import os
import numpy as np
from scipy.interpolate import interp2d
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rcParams
rcParams['image.cmap'] = 'magma'
from snap_reader import DiscReader, PlanetReader
from chemistry import SimpleCOMolAbund, SimpleCOAtomAbund
class Formatter(object):
def __init__(self, x,y,z):
self._x, self._y, self._z = x,y,z
self._zi = interp2d(np.log(self._x), self._y, np.log(self._z),
bounds_error=True)
def __call__(self, x, y):
try:
z = np.exp(self._zi(np.log(x), y)[0])
return 'x={:5g}, y={:5g}, z={:5g}'.format(x, y, z)
except ValueError:
return 'x={:5g}, y={:5g}'.format(x, y)
def make_plot_planets(planets):
for p in planets:
plt.subplot(211)
plt.loglog(p.R, p.M / 317.8)
plt.subplot(212)
plt.loglog(p.R, p.M_core)
plt.subplot(211)
plt.title('t_0 = {:g}yr'.format(p.t_form[0]))
plt.ylabel('$M\,[M_J]$')
plt.plot([0.1, 300], [1,1], 'k--')
plt.xlim(0.1, 300)
plt.subplot(212)
plt.ylabel('$M_c\,[M_\oplus]$')
plt.xlabel('$R\,[\mathrm{au}]$')
plt.xlim(0.1, 300)
if __name__ == "__main__":
import sys
DIR = os.path.join('../planets/pb_gas_acc_f_0.0/TimeDep/'
'irradiated/Rc_200/Mdot_1e-09/')
try:
DIR = sys.argv[1]
except IndexError:
pass
planets = PlanetReader(DIR, 'planets').compute_planet_evo()
# Collect the planet data
tf = {}
for p in planets:
if p.t_form[0] not in tf:
tf[p.t_form[0]] = []
tf[p.t_form[0]].append(p)
t_form = np.array(sorted(tf.keys()), dtype='f8')
R_form = np.array([p.R[0] for p in tf[t_form[0]]], dtype='f8')
M_final = np.empty([t_form.shape[0], R_form.shape[0]], dtype='f8')
R_final = np.empty([t_form.shape[0], R_form.shape[0]], dtype='f8')
for i, ti in enumerate(t_form):
for j, p in enumerate(tf[ti]):
M_final[i,j] = p[-1].M
R_final[i,j] = p[-1].R
t_form /= 1e6
MJ = 317.8
im = plt.pcolormesh(R_form, t_form, M_final, shading='gouraud',
norm=LogNorm(), vmin=0.5, vmax=10*MJ)
plt.gca().format_coord = Formatter(R_form, t_form, M_final)
CS = plt.contour(R_form, t_form, R_final, colors='b',
levels=[0.5, 1, 2, 5, 10, 20, 50, 100])
plt.colorbar(im, label='$M\,[\mathrm{M}_\oplus]$')
plt.clabel(CS, CS.levels[1::2], inline=True, fmt='%.1f')
#manual=[(21, 0.2), (20, 0.8), (63, 0.5)])
plt.xscale('log')
plt.xlabel('$R\,[\mathrm{au}]$')
plt.ylabel('$t_\mathrm{form}\,[\mathrm{Myr}]$')
plt.show()
| gpl-3.0 |
JeanKossaifi/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 83 | 17276 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LogisticRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
tschaume/pymatgen | pymatgen/analysis/defects/utils.py | 4 | 57308 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Utilities for defects module.
"""
import math
from monty.json import MSONable
import itertools
import pandas as pd
import numpy as np
from numpy.linalg import norm
import logging
from collections import defaultdict
from scipy.spatial import Voronoi
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import linkage, fcluster
from pymatgen.analysis.local_env import LocalStructOrderParams, \
MinimumDistanceNN, cn_opt_params
from pymatgen.core.periodic_table import Element, get_el_sp
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.phase_diagram import get_facets
from pymatgen.util.coord import pbc_diff
from pymatgen.vis.structure_vtk import StructureVis
from monty.dev import requires
from copy import deepcopy
try:
from skimage.feature import peak_local_max
peak_local_max_found = True
except ImportError:
peak_local_max_found = False
__author__ = "Danny Broberg, Shyam Dwaraknath, Bharat Medasani, Nils Zimmermann, Geoffroy Hautier"
__copyright__ = "Copyright 2014, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Danny Broberg, Shyam Dwaraknath"
__email__ = "[email protected], [email protected]"
__status__ = "Development"
__date__ = "January 11, 2018"
logger = logging.getLogger(__name__)
hart_to_ev = 27.2114
ang_to_bohr = 1.8897
invang_to_ev = 3.80986
kb = 8.6173324e-5 # eV / K
kumagai_to_V = 1.809512739e2 # = Electron charge * 1e10 / VacuumPermittivity Constant
motif_cn_op = {}
for cn, di in cn_opt_params.items(): # type: ignore
for mot, li in di.items():
motif_cn_op[mot] = {'cn': int(cn), 'optype': li[0]}
motif_cn_op[mot]['params'] = deepcopy(li[1]) if len(li) > 1 else None
class QModel(MSONable):
"""
Model for the defect charge distribution.
A combination of exponential tail and gaussian distribution is used
(see Freysoldt (2011), DOI: 10.1002/pssb.201046289 )
q_model(r) = q [x exp(-r/gamma) + (1-x) exp(-r^2/beta^2)]
without normalization constants
By default, gaussian distribution with 1 Bohr width is assumed.
If defect charge is more delocalized, exponential tail is suggested.
"""
def __init__(self, beta=1.0, expnorm=0.0, gamma=1.0):
"""
Args:
beta: Gaussian decay constant. Default value is 1 Bohr.
When delocalized (eg. diamond), 2 Bohr is more appropriate.
expnorm: Weight for the exponential tail in the range of [0-1].
Default is 0.0 indicating no tail .
For delocalized charges ideal value is around 0.54-0.6.
gamma: Exponential decay constant
"""
self.beta = beta
self.expnorm = expnorm
self.gamma = gamma
self.beta2 = beta * beta
self.gamma2 = gamma * gamma
if expnorm and not gamma:
raise ValueError("Please supply exponential decay constant.")
def rho_rec(self, g2):
"""
Reciprocal space model charge value
for input squared reciprocal vector.
Args:
g2: Square of reciprocal vector
Returns:
Charge density at the reciprocal vector magnitude
"""
return (self.expnorm / np.sqrt(1 + self.gamma2 * g2) + (
1 - self.expnorm) * np.exp(-0.25 * self.beta2 * g2))
@property
def rho_rec_limit0(self):
"""
Reciprocal space model charge value
close to reciprocal vector 0 .
rho_rec(g->0) -> 1 + rho_rec_limit0 * g^2
"""
return -2 * self.gamma2 * self.expnorm - 0.25 * self.beta2 * (
1 - self.expnorm)
def eV_to_k(energy):
"""
Convert energy to reciprocal vector magnitude k via hbar*k^2/2m
Args:
a: Energy in eV.
Returns:
(double) Reciprocal vector magnitude (units of 1/Bohr).
"""
return math.sqrt(energy / invang_to_ev) * ang_to_bohr
def genrecip(a1, a2, a3, encut):
"""
Args:
a1, a2, a3: lattice vectors in bohr
encut: energy cut off in eV
Returns:
reciprocal lattice vectors with energy less than encut
"""
vol = np.dot(a1, np.cross(a2, a3)) # 1/bohr^3
b1 = (2 * np.pi / vol) * np.cross(a2, a3) # units 1/bohr
b2 = (2 * np.pi / vol) * np.cross(a3, a1)
b3 = (2 * np.pi / vol) * np.cross(a1, a2)
# create list of recip space vectors that satisfy |i*b1+j*b2+k*b3|<=encut
G_cut = eV_to_k(encut)
# Figure out max in all recipricol lattice directions
i_max = int(math.ceil(G_cut / norm(b1)))
j_max = int(math.ceil(G_cut / norm(b2)))
k_max = int(math.ceil(G_cut / norm(b3)))
# Build index list
i = np.arange(-i_max, i_max)
j = np.arange(-j_max, j_max)
k = np.arange(-k_max, k_max)
# Convert index to vectors using meshgrid
indicies = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3)
# Multiply integer vectors to get recipricol space vectors
vecs = np.dot(indicies, [b1, b2, b3])
# Calculate radii of all vectors
radii = np.sqrt(np.einsum('ij,ij->i', vecs, vecs))
# Yield based on radii
for vec, r in zip(vecs, radii):
if r < G_cut and r != 0:
yield vec
def generate_reciprocal_vectors_squared(a1, a2, a3, encut):
"""
Generate reciprocal vector magnitudes within the cutoff along the specied
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2.
"""
for vec in genrecip(a1, a2, a3, encut):
yield np.dot(vec, vec)
def closestsites(struct_blk, struct_def, pos):
"""
Returns closest site to the input position
for both bulk and defect structures
Args:
struct_blk: Bulk structure
struct_def: Defect structure
pos: Position
Return: (site object, dist, index)
"""
blk_close_sites = struct_blk.get_sites_in_sphere(pos, 5, include_index=True)
blk_close_sites.sort(key=lambda x: x[1])
def_close_sites = struct_def.get_sites_in_sphere(pos, 5, include_index=True)
def_close_sites.sort(key=lambda x: x[1])
return blk_close_sites[0], def_close_sites[0]
class StructureMotifInterstitial:
"""
Generate interstitial sites at positions
where the interstitialcy is coordinated by nearest neighbors
in a way that resembles basic structure motifs
(e.g., tetrahedra, octahedra). The algorithm is called InFiT
(Interstitialcy Finding Tool), it was introducted by
Nils E. R. Zimmermann, Matthew K. Horton, Anubhav Jain,
and Maciej Haranczyk (Front. Mater., 4, 34, 2017),
and it is used by the Python Charged Defect Toolkit
(PyCDT: D. Broberg et al., Comput. Phys. Commun., in press, 2018).
"""
def __init__(self,
struct,
inter_elem,
motif_types=("tetrahedral", "octahedral"),
op_threshs=(0.3, 0.5),
dl=0.2,
doverlap=1,
facmaxdl=1.01,
verbose=False):
"""
Generates symmetrically distinct interstitial sites at positions
where the interstitial is coordinated by nearest neighbors
in a pattern that resembles a supported structure motif
(e.g., tetrahedra, octahedra).
Args:
struct (Structure): input structure for which symmetrically
distinct interstitial sites are to be found.
inter_elem (string): element symbol of desired interstitial.
motif_types ([string]): list of structure motif types that are
to be considered. Permissible types are:
tet (tetrahedron), oct (octahedron).
op_threshs ([float]): threshold values for the underlying order
parameters to still recognize a given structural motif
(i.e., for an OP value >= threshold the coordination pattern
match is positive, for OP < threshold the match is
negative.
dl (float): grid fineness in Angstrom. The input
structure is divided into a grid of dimension
a/dl x b/dl x c/dl along the three crystallographic
directions, with a, b, and c being the lengths of
the three lattice vectors of the input unit cell.
doverlap (float): distance that is considered
to flag an overlap between any trial interstitial site
and a host atom.
facmaxdl (float): factor to be multiplied with the maximum grid
width that is then used as a cutoff distance for the
clustering prune step.
verbose (bool): flag indicating whether (True) or not (False;
default) to print additional information to screen.
"""
# Initialize interstitial finding.
self._structure = struct.copy()
self._motif_types = motif_types[:]
if len(self._motif_types) == 0:
raise RuntimeError("no motif types provided.")
self._op_threshs = op_threshs[:]
self.cn_motif_lostop = {}
self.target_cns = []
for motif in self._motif_types:
if motif not in list(motif_cn_op.keys()):
raise RuntimeError("unsupported motif type: {}.".format(motif))
cn = int(motif_cn_op[motif]['cn'])
if cn not in self.target_cns:
self.target_cns.append(cn)
if cn not in list(self.cn_motif_lostop.keys()):
self.cn_motif_lostop[cn] = {}
tmp_optype = motif_cn_op[motif]['optype']
if tmp_optype == 'tet_max':
tmp_optype = 'tet'
if tmp_optype == 'oct_max':
tmp_optype = 'oct'
self.cn_motif_lostop[cn][motif] = LocalStructOrderParams(
[tmp_optype], parameters=[motif_cn_op[motif]['params']],
cutoff=-10.0)
self._dl = dl
self._defect_sites = []
self._defect_types = []
self._defect_site_multiplicity = []
self._defect_cns = []
self._defect_opvals = []
rots, trans = SpacegroupAnalyzer(struct)._get_symmetry()
nbins = [int(struct.lattice.a / dl), int(struct.lattice.b / dl),
int(struct.lattice.c / dl)]
dls = [
struct.lattice.a / float(nbins[0]),
struct.lattice.b / float(nbins[1]),
struct.lattice.c / float(nbins[2])
]
maxdl = max(dls)
if verbose:
print("Grid size: {} {} {}".format(nbins[0], nbins[1], nbins[2]))
print("dls: {} {} {}".format(dls[0], dls[1], dls[2]))
struct_w_inter = struct.copy()
struct_w_inter.append(inter_elem, [0, 0, 0])
natoms = len(list(struct_w_inter.sites))
trialsites = []
# Build index list
i = np.arange(0, nbins[0]) + 0.5
j = np.arange(0, nbins[1]) + 0.5
k = np.arange(0, nbins[2]) + 0.5
# Convert index to vectors using meshgrid
indicies = np.array(np.meshgrid(i, j, k)).T.reshape(-1, 3)
# Multiply integer vectors to get recipricol space vectors
vecs = np.multiply(indicies, np.divide(1, nbins))
# Loop over trial positions that are based on a regular
# grid in fractional coordinate space
# within the unit cell.
for vec in vecs:
struct_w_inter.replace(natoms - 1, inter_elem, coords=vec,
coords_are_cartesian=False)
if len(struct_w_inter.get_sites_in_sphere(
struct_w_inter.sites[natoms - 1].coords, doverlap)) == 1:
neighs_images_weigths = MinimumDistanceNN(tol=0.8,
cutoff=6).get_nn_info(
struct_w_inter, natoms - 1)
neighs_images_weigths_sorted = sorted(neighs_images_weigths,
key=lambda x: x['weight'],
reverse=True)
for nsite in range(1, len(neighs_images_weigths_sorted) + 1):
if nsite not in self.target_cns:
continue
allsites = [neighs_images_weigths_sorted[i]['site'] for i in
range(nsite)]
indices_neighs = [i for i in range(len(allsites))]
allsites.append(struct_w_inter.sites[natoms - 1])
for mot, ops in self.cn_motif_lostop[nsite].items():
opvals = ops.get_order_parameters(
allsites, len(allsites) - 1,
indices_neighs=indices_neighs)
if opvals[0] > op_threshs[motif_types.index(mot)]:
cns = {}
for isite in range(nsite):
site = neighs_images_weigths_sorted[isite][
'site']
if isinstance(site.specie, Element):
elem = site.specie.symbol
else:
elem = site.specie.element.symbol
if elem in list(cns.keys()):
cns[elem] = cns[elem] + 1
else:
cns[elem] = 1
trialsites.append({
"mtype": mot,
"opval": opvals[0],
"coords": struct_w_inter.sites[
natoms - 1].coords[:],
"fracs": vec,
"cns": dict(cns)
})
break
# Prune list of trial sites by clustering and find the site
# with the largest order parameter value in each cluster.
nintersites = len(trialsites)
unique_motifs = []
for ts in trialsites:
if ts["mtype"] not in unique_motifs:
unique_motifs.append(ts["mtype"])
labels = {}
connected = []
for i in range(nintersites):
connected.append([])
for j in range(nintersites):
dist, image = struct_w_inter.lattice.get_distance_and_image(
trialsites[i]["fracs"],
trialsites[j]["fracs"])
connected[i].append(
True if dist < (maxdl * facmaxdl) else False)
include = []
for motif in unique_motifs:
labels[motif] = []
for i, ts in enumerate(trialsites):
labels[motif].append(i if ts["mtype"] == motif else -1)
change = True
while change:
change = False
for i in range(nintersites - 1):
if change:
break
if labels[motif][i] == -1:
continue
for j in range(i + 1, nintersites):
if labels[motif][j] == -1:
continue
if connected[i][j] and labels[motif][i] != \
labels[motif][j]:
if labels[motif][i] < labels[motif][j]:
labels[motif][j] = labels[motif][i]
else:
labels[motif][i] = labels[motif][j]
change = True
break
unique_ids = []
for l in labels[motif]:
if l != -1 and l not in unique_ids:
unique_ids.append(l)
if verbose:
print("unique_ids {} {}".format(motif, unique_ids))
for uid in unique_ids:
maxq = 0.0
imaxq = -1
for i in range(nintersites):
if labels[motif][i] == uid:
if imaxq < 0 or trialsites[i]["opval"] > maxq:
imaxq = i
maxq = trialsites[i]["opval"]
include.append(imaxq)
# Prune by symmetry.
multiplicity = {}
discard = []
for motif in unique_motifs:
discard_motif = []
for indi, i in enumerate(include):
if trialsites[i]["mtype"] != motif or \
i in discard_motif:
continue
multiplicity[i] = 1
symposlist = [
trialsites[i]["fracs"].dot(np.array(m, dtype=float)) for m
in rots]
for t in trans:
symposlist.append(trialsites[i]["fracs"] + np.array(t))
for indj in range(indi + 1, len(include)):
j = include[indj]
if trialsites[j]["mtype"] != motif or \
j in discard_motif:
continue
for sympos in symposlist:
dist, image = struct.lattice.get_distance_and_image(
sympos, trialsites[j]["fracs"])
if dist < maxdl * facmaxdl:
discard_motif.append(j)
multiplicity[i] += 1
break
for i in discard_motif:
if i not in discard:
discard.append(i)
if verbose:
print("Initial trial sites: {}\nAfter clustering: {}\n"
"After symmetry pruning: {}".format(len(trialsites),
len(include),
len(include) - len(
discard)))
for i in include:
if i not in discard:
self._defect_sites.append(
PeriodicSite(
Element(inter_elem),
trialsites[i]["fracs"],
self._structure.lattice,
to_unit_cell=False,
coords_are_cartesian=False,
properties=None))
self._defect_types.append(trialsites[i]["mtype"])
self._defect_cns.append(trialsites[i]["cns"])
self._defect_site_multiplicity.append(multiplicity[i])
self._defect_opvals.append(trialsites[i]["opval"])
def enumerate_defectsites(self):
"""
Get all defect sites.
Returns:
defect_sites ([PeriodicSite]): list of periodic sites
representing the interstitials.
"""
return self._defect_sites
def get_motif_type(self, i):
"""
Get the motif type of defect with index i (e.g., "tet").
Returns:
motif (string): motif type.
"""
return self._defect_types[i]
def get_defectsite_multiplicity(self, n):
"""
Returns the symmtric multiplicity of the defect site at the index.
"""
return self._defect_site_multiplicity[n]
def get_coordinating_elements_cns(self, i):
"""
Get element-specific coordination numbers of defect with index i.
Returns:
elem_cn (dict): dictionary storing the coordination numbers (int)
with string representation of elements as keys.
(i.e., {elem1 (string): cn1 (int), ...}).
"""
return self._defect_cns[i]
def get_op_value(self, i):
"""
Get order-parameter value of defect with index i.
Returns:
opval (float): OP value.
"""
return self._defect_opvals[i]
def make_supercells_with_defects(self, scaling_matrix):
"""
Generate a sequence of supercells
in which each supercell contains a single interstitial,
except for the first supercell in the sequence
which is a copy of the defect-free input structure.
Args:
scaling_matrix (3x3 integer array): scaling matrix
to transform the lattice vectors.
Returns:
scs ([Structure]): sequence of supercells.
"""
scs = []
sc = self._structure.copy()
sc.make_supercell(scaling_matrix)
scs.append(sc)
for ids, defect_site in enumerate(self._defect_sites):
sc_with_inter = sc.copy()
sc_with_inter.append(
defect_site.species_string,
defect_site.frac_coords,
coords_are_cartesian=False,
validate_proximity=False,
properties=None)
if not sc_with_inter:
raise RuntimeError(
"could not generate supercell with" " interstitial {}".format(
ids + 1))
scs.append(sc_with_inter.copy())
return scs
class TopographyAnalyzer:
"""
This is a generalized module to perform topological analyses of a crystal
structure using Voronoi tessellations. It can be used for finding potential
interstitial sites. Applications including using these sites for
inserting additional atoms or for analyzing diffusion pathways.
Note that you typically want to do some preliminary postprocessing after
the initial construction. The initial construction will create a lot of
points, especially for determining potential insertion sites. Some helper
methods are available to perform aggregation and elimination of nodes. A
typical use is something like::
a = TopographyAnalyzer(structure, ["O"], ["P"])
a.cluster_nodes()
a.remove_collisions()
"""
def __init__(self,
structure,
framework_ions,
cations,
tol=0.0001,
max_cell_range=1,
check_volume=True,
constrained_c_frac=0.5,
thickness=0.5):
"""
Init.
Args:
structure (Structure): An initial structure.
framework_ions ([str]): A list of ions to be considered as a
framework. Typically, this would be all anion species. E.g.,
["O", "S"].
cations ([str]): A list of ions to be considered as non-migrating
cations. E.g., if you are looking at Li3PS4 as a Li
conductor, Li is a mobile species. Your cations should be [
"P"]. The cations are used to exclude polyhedra from
diffusion analysis since those polyhedra are already occupied.
tol (float): A tolerance distance for the analysis, used to
determine if something are actually periodic boundary images of
each other. Default is usually fine.
max_cell_range (int): This is the range of periodic images to
construct the Voronoi tesselation. A value of 1 means that we
include all points from (x +- 1, y +- 1, z+- 1) in the
voronoi construction. This is because the Voronoi poly
extends beyond the standard unit cell because of PBC.
Typically, the default value of 1 works fine for most
structures and is fast. But for really small unit
cells with high symmetry, you may need to increase this to 2
or higher.
check_volume (bool): Set False when ValueError always happen after
tuning tolerance.
constrained_c_frac (float): Constraint the region where users want
to do Topology analysis the default value is 0.5, which is the
fractional coordinate of the cell
thickness (float): Along with constrained_c_frac, limit the
thickness of the regions where we want to explore. Default is
0.5, which is mapping all the site of the unit cell.
"""
self.structure = structure
self.framework_ions = set([get_el_sp(sp) for sp in framework_ions])
self.cations = set([get_el_sp(sp) for sp in cations])
# Let us first map all sites to the standard unit cell, i.e.,
# 0 ≤ coordinates < 1.
# structure = Structure.from_sites(structure, to_unit_cell=True)
# lattice = structure.lattice
# We could constrain the region where we want to dope/explore by setting
# the value of constrained_c_frac and thickness. The default mode is
# mapping all sites to the standard unit cell
s = structure.copy()
constrained_sites = []
for i, site in enumerate(s):
if site.frac_coords[2] >= constrained_c_frac - thickness and \
site.frac_coords[
2] <= constrained_c_frac + thickness:
constrained_sites.append(site)
structure = Structure.from_sites(sites=constrained_sites)
lattice = structure.lattice
# Divide the sites into framework and non-framework sites.
framework = []
non_framework = []
for site in structure:
if self.framework_ions.intersection(site.species.keys()):
framework.append(site)
else:
non_framework.append(site)
# We construct a supercell series of coords. This is because the
# Voronoi polyhedra can extend beyond the standard unit cell. Using a
# range of -2, -1, 0, 1 should be fine.
coords = []
cell_range = list(range(-max_cell_range, max_cell_range + 1))
for shift in itertools.product(cell_range, cell_range, cell_range):
for site in framework:
shifted = site.frac_coords + shift
coords.append(lattice.get_cartesian_coords(shifted))
# Perform the voronoi tessellation.
voro = Voronoi(coords)
# Store a mapping of each voronoi node to a set of points.
node_points_map = defaultdict(set)
for pts, vs in voro.ridge_dict.items():
for v in vs:
node_points_map[v].update(pts)
logger.debug("%d total Voronoi vertices" % len(voro.vertices))
# Vnodes store all the valid voronoi polyhedra. Cation vnodes store
# the voronoi polyhedra that are already occupied by existing cations.
vnodes = []
cation_vnodes = []
def get_mapping(poly):
"""
Helper function to check if a vornoi poly is a periodic image
of one of the existing voronoi polys.
"""
for v in vnodes:
if v.is_image(poly, tol):
return v
return None
# Filter all the voronoi polyhedra so that we only consider those
# which are within the unit cell.
for i, vertex in enumerate(voro.vertices):
if i == 0:
continue
fcoord = lattice.get_fractional_coords(vertex)
poly = VoronoiPolyhedron(lattice, fcoord, node_points_map[i],
coords, i)
if np.all([-tol <= c < 1 + tol for c in fcoord]):
if len(vnodes) == 0:
vnodes.append(poly)
else:
ref = get_mapping(poly)
if ref is None:
vnodes.append(poly)
logger.debug("%d voronoi vertices in cell." % len(vnodes))
# Eliminate all voronoi nodes which are closest to existing cations.
if len(cations) > 0:
cation_coords = [
site.frac_coords for site in non_framework if
self.cations.intersection(site.species.keys())
]
vertex_fcoords = [v.frac_coords for v in vnodes]
dist_matrix = lattice.get_all_distances(cation_coords,
vertex_fcoords)
indices = \
np.where(dist_matrix == np.min(dist_matrix, axis=1)[:, None])[1]
cation_vnodes = [v for i, v in enumerate(vnodes) if i in indices]
vnodes = [v for i, v in enumerate(vnodes) if i not in indices]
logger.debug("%d vertices in cell not with cation." % len(vnodes))
self.coords = coords
self.vnodes = vnodes
self.cation_vnodes = cation_vnodes
self.framework = framework
self.non_framework = non_framework
if check_volume:
self.check_volume()
def check_volume(self):
"""
Basic check for volume of all voronoi poly sum to unit cell volume
Note that this does not apply after poly combination.
"""
vol = sum((v.volume for v in self.vnodes)) + sum(
(v.volume for v in self.cation_vnodes))
if abs(vol - self.structure.volume) > 1e-8:
raise ValueError(
"Sum of voronoi volumes is not equal to original volume of "
"structure! This may lead to inaccurate results. You need to "
"tweak the tolerance and max_cell_range until you get a "
"correct mapping.")
def cluster_nodes(self, tol=0.2):
"""
Cluster nodes that are too close together using a tol.
Args:
tol (float): A distance tolerance. PBC is taken into account.
"""
lattice = self.structure.lattice
vfcoords = [v.frac_coords for v in self.vnodes]
# Manually generate the distance matrix (which needs to take into
# account PBC.
dist_matrix = np.array(lattice.get_all_distances(vfcoords, vfcoords))
dist_matrix = (dist_matrix + dist_matrix.T) / 2
for i in range(len(dist_matrix)):
dist_matrix[i, i] = 0
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
cn = fcluster(z, tol, criterion="distance")
merged_vnodes = []
for n in set(cn):
poly_indices = set()
frac_coords = []
for i, j in enumerate(np.where(cn == n)[0]):
poly_indices.update(self.vnodes[j].polyhedron_indices)
if i == 0:
frac_coords.append(self.vnodes[j].frac_coords)
else:
fcoords = self.vnodes[j].frac_coords
# We need the image to combine the frac_coords properly.
d, image = lattice.get_distance_and_image(frac_coords[0],
fcoords)
frac_coords.append(fcoords + image)
merged_vnodes.append(
VoronoiPolyhedron(lattice, np.average(frac_coords, axis=0),
poly_indices, self.coords))
self.vnodes = merged_vnodes
logger.debug("%d vertices after combination." % len(self.vnodes))
def remove_collisions(self, min_dist=0.5):
"""
Remove vnodes that are too close to existing atoms in the structure
Args:
min_dist(float): The minimum distance that a vertex needs to be
from existing atoms.
"""
vfcoords = [v.frac_coords for v in self.vnodes]
sfcoords = self.structure.frac_coords
dist_matrix = self.structure.lattice.get_all_distances(vfcoords,
sfcoords)
all_dist = np.min(dist_matrix, axis=1)
new_vnodes = []
for i, v in enumerate(self.vnodes):
if all_dist[i] > min_dist:
new_vnodes.append(v)
self.vnodes = new_vnodes
def get_structure_with_nodes(self):
"""
Get the modified structure with the voronoi nodes inserted. The
species is set as a DummySpecie X.
"""
new_s = Structure.from_sites(self.structure)
for v in self.vnodes:
new_s.append("X", v.frac_coords)
return new_s
def print_stats(self):
"""
Print stats such as the MSE dist.
"""
latt = self.structure.lattice
def get_min_dist(fcoords):
n = len(fcoords)
dist = latt.get_all_distances(fcoords, fcoords)
all_dist = [dist[i, j] for i in range(n) for j in range(i + 1, n)]
return min(all_dist)
voro = [s[1] for s in self.vertices]
print("Min dist between voronoi vertices centers = %.4f" % get_min_dist(
voro))
def get_non_framework_dist(fcoords):
cations = [site.frac_coords for site in self.non_framework]
dist_matrix = latt.get_all_distances(cations, fcoords)
min_dist = np.min(dist_matrix, axis=1)
if len(cations) != len(min_dist):
raise Exception("Could not calculate distance to all cations")
return np.linalg.norm(min_dist), min(min_dist), max(min_dist)
print(len(self.non_framework))
print("MSE dist voro = %s" % str(get_non_framework_dist(voro)))
def write_topology(self, fname="Topo.cif"):
"""
Write topology to a file.
:param fname: Filename
"""
new_s = Structure.from_sites(self.structure)
for v in self.vnodes:
new_s.append("Mg", v.frac_coords)
new_s.to(filename=fname)
def analyze_symmetry(self, tol):
"""
:param tol: Tolerance for SpaceGroupAnalyzer
:return: List
"""
s = Structure.from_sites(self.framework)
site_to_vindex = {}
for i, v in enumerate(self.vnodes):
s.append("Li", v.frac_coords)
site_to_vindex[s[-1]] = i
print(len(s))
finder = SpacegroupAnalyzer(s, tol)
print(finder.get_space_group_operations())
symm_structure = finder.get_symmetrized_structure()
print(len(symm_structure.equivalent_sites))
return [[site_to_vindex[site]
for site in sites]
for sites in symm_structure.equivalent_sites
if sites[0].specie.symbol == "Li"]
def vtk(self):
"""
Show VTK visualization.
"""
if StructureVis is None:
raise NotImplementedError("vtk must be present to view.")
lattice = self.structure.lattice
vis = StructureVis()
vis.set_structure(Structure.from_sites(self.structure))
for v in self.vnodes:
vis.add_site(PeriodicSite("K", v.frac_coords, lattice))
vis.add_polyhedron(
[PeriodicSite("S", c, lattice, coords_are_cartesian=True) for c
in v.polyhedron_coords],
PeriodicSite("Na", v.frac_coords, lattice),
color="element",
draw_edges=True,
edges_color=(0, 0, 0))
vis.show()
class VoronoiPolyhedron:
"""
Convenience container for a voronoi point in PBC and its associated polyhedron.
"""
def __init__(self, lattice, frac_coords, polyhedron_indices, all_coords,
name=None):
"""
:param lattice:
:param frac_coords:
:param polyhedron_indices:
:param all_coords:
:param name:
"""
self.lattice = lattice
self.frac_coords = frac_coords
self.polyhedron_indices = polyhedron_indices
self.polyhedron_coords = np.array(all_coords)[list(polyhedron_indices), :]
self.name = name
def is_image(self, poly, tol):
"""
:param poly: VoronoiPolyhedron
:param tol: Coordinate tolerance.
:return: Whether a poly is an image of the current one.
"""
frac_diff = pbc_diff(poly.frac_coords, self.frac_coords)
if not np.allclose(frac_diff, [0, 0, 0], atol=tol):
return False
to_frac = self.lattice.get_fractional_coords
for c1 in self.polyhedron_coords:
found = False
for c2 in poly.polyhedron_coords:
d = pbc_diff(to_frac(c1), to_frac(c2))
if not np.allclose(d, [0, 0, 0], atol=tol):
found = True
break
if not found:
return False
return True
@property
def coordination(self):
"""
:return: Coordination number
"""
return len(self.polyhedron_indices)
@property
def volume(self):
"""
:return: Volume
"""
return calculate_vol(self.polyhedron_coords)
def __str__(self):
return "Voronoi polyhedron %s" % self.name
class ChargeDensityAnalyzer:
"""
Analyzer to find potential interstitial sites based on charge density. The
`total` charge density is used.
"""
def __init__(self, chgcar):
"""
Initialization.
Args:
chgcar (pmg.Chgcar): input Chgcar object.
"""
self.chgcar = chgcar
self.structure = chgcar.structure
self.extrema_coords = [] # list of frac_coords of local extrema
self.extrema_type = None # "local maxima" or "local minima"
self._extrema_df = None # extrema frac_coords - chg density table
self._charge_distribution_df = None # frac_coords - chg density table
@classmethod
def from_file(cls, chgcar_filename):
"""
Init from a CHGCAR.
:param chgcar_filename:
:return:
"""
chgcar = Chgcar.from_file(chgcar_filename)
return cls(chgcar=chgcar)
@property
def charge_distribution_df(self):
"""
:return: Charge distribution.
"""
if self._charge_distribution_df is None:
return self._get_charge_distribution_df()
else:
return self._charge_distribution_df
@property
def extrema_df(self):
"""
:return: The extrema in charge density.
"""
if self.extrema_type is None:
logger.warning(
"Please run ChargeDensityAnalyzer.get_local_extrema first!")
return self._extrema_df
def _get_charge_distribution_df(self):
"""
Return a complete table of fractional coordinates - charge density.
"""
# Fraction coordinates and corresponding indices
axis_grid = np.array([np.array(self.chgcar.get_axis_grid(i)) /
self.structure.lattice.abc[i] for i in range(3)])
axis_index = np.array([range(len(axis_grid[i])) for i in range(3)])
data = {}
for index in itertools.product(*axis_index):
a, b, c = index
f_coords = (axis_grid[0][a], axis_grid[1][b], axis_grid[2][c])
data[f_coords] = self.chgcar.data["total"][a][b][c]
# Fraction coordinates - charge density table
df = pd.Series(data).reset_index()
df.columns = ['a', 'b', 'c', 'Charge Density']
self._charge_distribution_df = df
return df
def _update_extrema(self, f_coords, extrema_type, threshold_frac=None,
threshold_abs=None):
"""Update _extrema_df, extrema_type and extrema_coords"""
if threshold_frac is not None:
if threshold_abs is not None:
logger.warning( # Exit if both filter are set
"Filter can be either threshold_frac or threshold_abs!")
return
if threshold_frac > 1 or threshold_frac < 0:
raise Exception("threshold_frac range is [0, 1]!")
# Return empty result if coords list is empty
if len(f_coords) == 0:
df = pd.DataFrame({}, columns=['A', 'B', 'C', "Chgcar"])
self._extrema_df = df
self.extrema_coords = []
logger.info("Find {} {}.".format(len(df), extrema_type))
return
data = {}
unit = 1 / np.array(self.chgcar.dim) # pixel along a, b, c
for fc in f_coords:
a, b, c = tuple(map(int, fc / unit))
data[tuple(fc)] = self.chgcar.data["total"][a][b][c]
df = pd.Series(data).reset_index()
df.columns = ['a', 'b', 'c', 'Charge Density']
ascending = (extrema_type == "local minima")
if threshold_abs is None:
threshold_frac = threshold_frac \
if threshold_frac is not None else 1.0
num_extrema = int(threshold_frac * len(f_coords))
df = df.sort_values(by="Charge Density", ascending=ascending)[
0:num_extrema]
df.reset_index(drop=True, inplace=True) # reset major index
else: # threshold_abs is set
df = df.sort_values(by="Charge Density", ascending=ascending)
df = df[df["Charge Density"] <= threshold_abs] if ascending \
else df[df["Charge Density"] >= threshold_abs]
extrema_coords = []
for row in df.iterrows():
fc = np.array(row[1]["a":"c"])
extrema_coords.append(fc)
self._extrema_df = df
self.extrema_type = extrema_type
self.extrema_coords = extrema_coords
logger.info("Find {} {}.".format(len(df), extrema_type))
@requires(peak_local_max_found,
"get_local_extrema requires skimage.feature.peak_local_max module"
" to be installed. Please confirm your skimage installation.")
def get_local_extrema(self, find_min=True, threshold_frac=None,
threshold_abs=None):
"""
Get all local extrema fractional coordinates in charge density,
searching for local minimum by default. Note that sites are NOT grouped
symmetrically.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
threshold_frac (float): optional fraction of extrema shown, which
returns `threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to show the extrema with 20% highest or lowest
intensity. Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
extrema_coords (list): list of fractional coordinates corresponding
to local extrema.
"""
sign, extrema_type = 1, "local maxima"
if find_min:
sign, extrema_type = -1, "local minima"
# Make 3x3x3 supercell
# This is a trick to resolve the periodical boundary issue.
total_chg = sign * self.chgcar.data["total"]
total_chg = np.tile(total_chg, reps=(3, 3, 3))
coordinates = peak_local_max(total_chg, min_distance=1)
# Remove duplicated sites introduced by supercell.
f_coords = [coord / total_chg.shape * 3 for coord in coordinates]
f_coords = [f - 1 for f in f_coords if
all(np.array(f) < 2) and all(np.array(f) >= 1)]
# Update information
self._update_extrema(f_coords, extrema_type,
threshold_frac=threshold_frac,
threshold_abs=threshold_abs)
return self.extrema_coords
def cluster_nodes(self, tol=0.2):
"""
Cluster nodes that are too close together using a tol.
Args:
tol (float): A distance tolerance. PBC is taken into account.
"""
lattice = self.structure.lattice
vf_coords = self.extrema_coords
if len(vf_coords) == 0:
if self.extrema_type is None:
logger.warning(
"Please run ChargeDensityAnalyzer.get_local_extrema first!")
return
new_f_coords = []
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords
# Manually generate the distance matrix (which needs to take into
# account PBC.
dist_matrix = np.array(lattice.get_all_distances(vf_coords, vf_coords))
dist_matrix = (dist_matrix + dist_matrix.T) / 2
for i in range(len(dist_matrix)):
dist_matrix[i, i] = 0
condensed_m = squareform(dist_matrix)
z = linkage(condensed_m)
cn = fcluster(z, tol, criterion="distance")
merged_fcoords = []
for n in set(cn):
frac_coords = []
for i, j in enumerate(np.where(cn == n)[0]):
if i == 0:
frac_coords.append(self.extrema_coords[j])
else:
f_coords = self.extrema_coords[j]
# We need the image to combine the frac_coords properly.
d, image = lattice.get_distance_and_image(frac_coords[0],
f_coords)
frac_coords.append(f_coords + image)
merged_fcoords.append(np.average(frac_coords, axis=0))
merged_fcoords = [f - np.floor(f) for f in merged_fcoords]
merged_fcoords = [f * (np.abs(f - 1) > 1E-15) for f in merged_fcoords]
# the second line for fringe cases like
# np.array([ 5.0000000e-01 -4.4408921e-17 5.0000000e-01])
# where the shift to [0,1) does not work due to float precision
self._update_extrema(merged_fcoords, extrema_type=self.extrema_type)
logger.debug(
"{} vertices after combination.".format(len(self.extrema_coords)))
def remove_collisions(self, min_dist=0.5):
"""
Remove predicted sites that are too close to existing atoms in the
structure.
Args:
min_dist (float): The minimum distance (in Angstrom) that
a predicted site needs to be from existing atoms. A min_dist
with value <= 0 returns all sites without distance checking.
"""
s_f_coords = self.structure.frac_coords
f_coords = self.extrema_coords
if len(f_coords) == 0:
if self.extrema_type is None:
logger.warning(
"Please run ChargeDensityAnalyzer.get_local_extrema first!")
return
new_f_coords = []
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords
dist_matrix = self.structure.lattice.get_all_distances(f_coords,
s_f_coords)
all_dist = np.min(dist_matrix, axis=1)
new_f_coords = []
for i, f in enumerate(f_coords):
if all_dist[i] > min_dist:
new_f_coords.append(f)
self._update_extrema(new_f_coords, self.extrema_type)
return new_f_coords
def get_structure_with_nodes(self, find_min=True, min_dist=0.5, tol=0.2,
threshold_frac=None, threshold_abs=None):
"""
Get the modified structure with the possible interstitial sites added.
The species is set as a DummySpecie X.
Args:
find_min (bool): True to find local minimum else maximum, otherwise
find local maximum.
min_dist (float): The minimum distance (in Angstrom) that
a predicted site needs to be from existing atoms. A min_dist
with value <= 0 returns all sites without distance checking.
tol (float): A distance tolerance of nodes clustering that sites too
closed to other predicted sites will be merged. PBC is taken
into account.
threshold_frac (float): optional fraction of extrema, which returns
`threshold_frac * tot_num_extrema` extrema fractional
coordinates based on highest/lowest intensity.
E.g. set 0.2 to insert DummySpecie atom at the extrema with 20%
highest or lowest intensity.
Value range: 0 <= threshold_frac <= 1
Note that threshold_abs and threshold_frac should not set in the
same time.
threshold_abs (float): optional filter. When searching for local
minima, intensity <= threshold_abs returns; when searching for
local maxima, intensity >= threshold_abs returns.
Note that threshold_abs and threshold_frac should not set in the
same time.
Returns:
structure (Structure)
"""
structure = self.structure.copy()
self.get_local_extrema(find_min=find_min, threshold_frac=threshold_frac,
threshold_abs=threshold_abs)
self.remove_collisions(min_dist)
self.cluster_nodes(tol=tol)
for fc in self.extrema_coords:
structure.append("X", fc)
return structure
def sort_sites_by_integrated_chg(self, r=0.4):
"""
Get the average charge density around each local minima in the charge density
and store the result in _extrema_df
Args:
r (float): radius of sphere around each site to evaluate the average
"""
if self.extrema_type is None:
self.get_local_extrema()
int_den = []
for isite in self.extrema_coords:
mask = self._dist_mat(isite) < r
vol_sphere = self.chgcar.structure.volume * (mask.sum() / self.chgcar.ngridpts)
chg_in_sphere = np.sum(self.chgcar.data['total'] * mask) / mask.size / vol_sphere
int_den.append(chg_in_sphere)
self._extrema_df['avg_charge_den'] = int_den
self._extrema_df.sort_values(by=['avg_charge_den'], inplace=True)
self._extrema_df.reset_index(drop=True, inplace=True)
def _dist_mat(self, pos_frac):
# return a matrix that contains the distances
aa = np.linspace(0, 1, len(self.chgcar.get_axis_grid(0)),
endpoint=False)
bb = np.linspace(0, 1, len(self.chgcar.get_axis_grid(1)),
endpoint=False)
cc = np.linspace(0, 1, len(self.chgcar.get_axis_grid(2)),
endpoint=False)
AA, BB, CC = np.meshgrid(aa, bb, cc, indexing='ij')
dist_from_pos = self.chgcar.structure.lattice.get_all_distances(
fcoords1=np.vstack([AA.flatten(), BB.flatten(), CC.flatten()]).T,
fcoords2=pos_frac)
return dist_from_pos.reshape(AA.shape)
def calculate_vol(coords):
"""
Calculate volume given a set of coords.
:param coords: List of coords.
:return: Volume
"""
if len(coords) == 4:
coords_affine = np.ones((4, 4))
coords_affine[:, 0:3] = np.array(coords)
return abs(np.linalg.det(coords_affine)) / 6
else:
simplices = get_facets(coords, joggle=True)
center = np.average(coords, axis=0)
vol = 0
for s in simplices:
c = list(coords[i] for i in s)
c.append(center)
vol += calculate_vol(c)
return vol
def converge(f, step, tol, max_h):
"""
simple newton iteration based convergence function
"""
g = f(0)
dx = 10000
h = step
while (dx > tol):
g2 = f(h)
dx = abs(g - g2)
g = g2
h += step
if h > max_h:
raise Exception("Did not converge before {}".format(h))
return g
def tune_for_gamma(lattice, epsilon):
"""
This tunes the gamma parameter for Kumagai anisotropic
Ewald calculation. Method is to find a gamma parameter which generates a similar
number of reciprocal and real lattice vectors,
given the suggested cut off radii by Kumagai and Oba
"""
logger.debug("Converging for ewald parameter...")
prec = 25 # a reasonable precision to tune gamma for
gamma = (2 * np.average(lattice.abc)) ** (-1 / 2.)
recip_set, _, real_set, _ = generate_R_and_G_vecs(gamma, prec, lattice, epsilon)
recip_set = recip_set[0]
real_set = real_set[0]
logger.debug("First approach with gamma ={}\nProduced {} real vecs and {} recip "
"vecs.".format(gamma, len(real_set), len(recip_set)))
while float(len(real_set)) / len(recip_set) > 1.05 or \
float(len(recip_set)) / len(real_set) > 1.05:
gamma *= (float(len(real_set)) / float(len(recip_set))) ** 0.17
logger.debug("\tNot converged...Try modifying gamma to {}.".format(gamma))
recip_set, _, real_set, _ = generate_R_and_G_vecs(gamma, prec, lattice, epsilon)
recip_set = recip_set[0]
real_set = real_set[0]
logger.debug("Now have {} real vecs and {} recip vecs.".format(len(real_set), len(recip_set)))
logger.debug("Converged with gamma = {}".format(gamma))
return gamma
def generate_R_and_G_vecs(gamma, prec_set, lattice, epsilon):
"""
This returns a set of real and reciprocal lattice vectors
(and real/recip summation values)
based on a list of precision values (prec_set)
gamma (float): Ewald parameter
prec_set (list or number): for prec values to consider (20, 25, 30 are sensible numbers)
lattice: Lattice object of supercell in question
"""
if type(prec_set) != list:
prec_set = [prec_set]
[a1, a2, a3] = lattice.matrix # Angstrom
volume = lattice.volume
[b1, b2, b3] = lattice.reciprocal_lattice.matrix # 1/ Angstrom
invepsilon = np.linalg.inv(epsilon)
rd_epsilon = np.sqrt(np.linalg.det(epsilon))
# generate reciprocal vector set (for each prec_set)
recip_set = [[] for prec in prec_set]
recip_summation_values = [0. for prec in prec_set]
recip_cut_set = [(2 * gamma * prec) for prec in prec_set]
i_max = int(math.ceil(max(recip_cut_set) / np.linalg.norm(b1)))
j_max = int(math.ceil(max(recip_cut_set) / np.linalg.norm(b2)))
k_max = int(math.ceil(max(recip_cut_set) / np.linalg.norm(b3)))
for i in np.arange(-i_max, i_max + 1):
for j in np.arange(-j_max, j_max + 1):
for k in np.arange(-k_max, k_max + 1):
if not i and not j and not k:
continue
gvec = i * b1 + j * b2 + k * b3
normgvec = np.linalg.norm(gvec)
for recip_cut_ind, recip_cut in enumerate(recip_cut_set):
if normgvec <= recip_cut:
recip_set[recip_cut_ind].append(gvec)
Gdotdiel = np.dot(gvec, np.dot(epsilon, gvec))
summand = math.exp(-Gdotdiel / (4 * (gamma ** 2))) / Gdotdiel
recip_summation_values[recip_cut_ind] += summand
recip_summation_values = np.array(recip_summation_values)
recip_summation_values /= volume
# generate real vector set (for each prec_set)
real_set = [[] for prec in prec_set]
real_summation_values = [0. for prec in prec_set]
real_cut_set = [(prec / gamma) for prec in prec_set]
i_max = int(math.ceil(max(real_cut_set) / np.linalg.norm(a1)))
j_max = int(math.ceil(max(real_cut_set) / np.linalg.norm(a2)))
k_max = int(math.ceil(max(real_cut_set) / np.linalg.norm(a3)))
for i in np.arange(-i_max, i_max + 1):
for j in np.arange(-j_max, j_max + 1):
for k in np.arange(-k_max, k_max + 1):
rvec = i * a1 + j * a2 + k * a3
normrvec = np.linalg.norm(rvec)
for real_cut_ind, real_cut in enumerate(real_cut_set):
if normrvec <= real_cut:
real_set[real_cut_ind].append(rvec)
if normrvec > 1e-8:
sqrt_loc_res = np.sqrt(np.dot(rvec, np.dot(invepsilon, rvec)))
nmr = math.erfc(gamma * sqrt_loc_res)
real_summation_values[real_cut_ind] += nmr / sqrt_loc_res
real_summation_values = np.array(real_summation_values)
real_summation_values /= (4 * np.pi * rd_epsilon)
return recip_set, recip_summation_values, real_set, real_summation_values
| mit |
psychopy/versions | psychopy/data/base.py | 2 | 22096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
# from future import standard_library
# standard_library.install_aliases()
from builtins import str
from past.builtins import basestring
from builtins import object
import weakref
import pickle
import os
import sys
import copy
import inspect
import codecs
import numpy as np
import pandas as pd
import json_tricks
from pkg_resources import parse_version
import psychopy
from psychopy import logging
from psychopy.tools.filetools import (openOutputFile, genDelimiter,
genFilenameFromDelimiter, pathToString)
from psychopy.tools.fileerrortools import handleFileCollision
from psychopy.tools.arraytools import extendArr
from .utils import _getExcelCellName
try:
import openpyxl
if parse_version(openpyxl.__version__) >= parse_version('2.4.0'):
# openpyxl moved get_column_letter to utils.cell
from openpyxl.utils.cell import get_column_letter
else:
from openpyxl.cell import get_column_letter
from openpyxl import load_workbook, Workbook
haveOpenpyxl = True
except ImportError:
haveOpenpyxl = False
_experiments = weakref.WeakValueDictionary()
class _ComparisonMixin(object):
def __eq__(self, other):
# NoneType and booleans, for example, don't have a .__dict__ attribute.
try:
getattr(other, '__dict__')
except AttributeError:
return False
# Check if the dictionary keys are the same before proceeding.
if set(self.__dict__.keys()) != set(other.__dict__.keys()):
return False
# Loop over all keys, implementing special handling for certain
# data types.
for key, val in self.__dict__.items():
if isinstance(val, np.ma.core.MaskedArray):
if not np.ma.allclose(val, getattr(other, key)):
return False
elif isinstance(val, np.ndarray):
if not np.allclose(val, getattr(other, key)):
return False
elif isinstance(val, (pd.DataFrame, pd.Series)):
if not val.equals(getattr(other, key)):
return False
else:
if val != getattr(other, key):
return False
return True
def __ne__(self, other):
return not self == other
class _BaseTrialHandler(_ComparisonMixin):
def setExp(self, exp):
"""Sets the ExperimentHandler that this handler is attached to
Do NOT attempt to set the experiment using::
trials._exp = myExperiment
because it needs to be performed using the `weakref` module.
"""
# need to use a weakref to avoid creating a circular reference that
# prevents effective object deletion
expId = id(exp)
_experiments[expId] = exp
self._exp = expId
# origin will have been stored by the exp so don't store again:
self.origin = None
def getExp(self):
"""Return the ExperimentHandler that this handler is attached to,
if any. Returns None if not attached
"""
if self._exp is None or self._exp not in _experiments:
return None
else:
return _experiments[self._exp]
def _terminate(self):
"""Remove references to ourself in experiments and terminate the loop
"""
# remove ourself from the list of unfinished loops in the experiment
exp = self.getExp()
if exp != None:
exp.loopEnded(self)
# and halt the loop
raise StopIteration
def saveAsPickle(self, fileName, fileCollisionMethod='rename'):
"""Basically just saves a copy of the handler (with data) to a
pickle file.
This can be reloaded if necessary and further analyses carried out.
:Parameters:
fileCollisionMethod: Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
"""
fileName = pathToString(fileName)
if self.thisTrialN < 1 and self.thisRepN < 1:
# if both are < 1 we haven't started
if self.autoLog:
logging.info('.saveAsPickle() called but no trials completed.'
' Nothing saved')
return -1
if not fileName.endswith('.psydat'):
fileName += '.psydat'
with openOutputFile(fileName=fileName, append=False,
fileCollisionMethod=fileCollisionMethod) as f:
pickle.dump(self, f)
logging.info('saved data to %s' % f.name)
def saveAsText(self, fileName,
stimOut=None,
dataOut=('n', 'all_mean', 'all_std', 'all_raw'),
delim=None,
matrixOnly=False,
appendFile=True,
summarised=True,
fileCollisionMethod='rename',
encoding='utf-8-sig'):
"""
Write a text file with the data and various chosen stimulus attributes
:Parameters:
fileName:
will have .tsv appended and can include path info.
stimOut:
the stimulus attributes to be output. To use this you need to
use a list of dictionaries and give here the names of dictionary
keys that you want as strings
dataOut:
a list of strings specifying the dataType and the analysis to
be performed,in the form `dataType_analysis`. The data can be
any of the types that you added using trialHandler.data.add()
and the analysis can be either 'raw' or most things in the
numpy library, including; 'mean','std','median','max','min'...
The default values will output the raw, mean and std of all
datatypes found
delim:
allows the user to use a delimiter other than tab
("," is popular with file extension ".csv")
matrixOnly:
outputs the data with no header row or extraInfo attached
appendFile:
will add this output to the end of the specified file if
it already exists
fileCollisionMethod:
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
encoding:
The encoding to use when saving a the file. Defaults to `utf-8-sig`.
"""
fileName = pathToString(fileName)
if stimOut is None:
stimOut = []
if self.thisTrialN < 1 and self.thisRepN < 1:
# if both are < 1 we haven't started
if self.autoLog:
logging.info('TrialHandler.saveAsText called but no trials'
' completed. Nothing saved')
return -1
dataArray = self._createOutputArray(stimOut=stimOut,
dataOut=dataOut,
matrixOnly=matrixOnly)
# set default delimiter if none given
if delim is None:
delim = genDelimiter(fileName)
# create the file or send to stdout
fileName = genFilenameFromDelimiter(fileName, delim)
with openOutputFile(fileName=fileName, append=appendFile,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding) as f:
# loop through lines in the data matrix
for line in dataArray:
for cellN, entry in enumerate(line):
# surround in quotes to prevent effect of delimiter
if delim in str(entry):
f.write(u'"%s"' % str(entry))
else:
f.write(str(entry))
if cellN < (len(line) - 1):
f.write(delim)
f.write("\n") # add an EOL at end of each line
if (fileName is not None) and (fileName != 'stdout') and self.autoLog:
logging.info('saved data to %s' % f.name)
def printAsText(self, stimOut=None,
dataOut=('all_mean', 'all_std', 'all_raw'),
delim='\t',
matrixOnly=False):
"""Exactly like saveAsText() except that the output goes
to the screen instead of a file
"""
if stimOut is None:
stimOut = []
self.saveAsText('stdout', stimOut, dataOut, delim, matrixOnly)
def saveAsExcel(self, fileName, sheetName='rawData',
stimOut=None,
dataOut=('n', 'all_mean', 'all_std', 'all_raw'),
matrixOnly=False,
appendFile=True,
fileCollisionMethod='rename'):
"""
Save a summary data file in Excel OpenXML format workbook
(:term:`xlsx`) for processing in most spreadsheet packages.
This format is compatible with versions of Excel (2007 or greater)
and and with OpenOffice (>=3.0).
It has the advantage over the simpler text files (see
:func:`TrialHandler.saveAsText()` )
that data can be stored in multiple named sheets within the file.
So you could have a single file named after your experiment and
then have one worksheet for each participant. Or you could have
one file for each participant and then multiple sheets for
repeated sessions etc.
The file extension `.xlsx` will be added if not given already.
:Parameters:
fileName: string
the name of the file to create or append. Can include
relative or absolute path
sheetName: string
the name of the worksheet within the file
stimOut: list of strings
the attributes of the trial characteristics to be output.
To use this you need to have provided a list of dictionaries
specifying to trialList parameter of the TrialHandler and
give here the names of strings specifying entries in that
dictionary
dataOut: list of strings
specifying the dataType and the analysis to
be performed, in the form `dataType_analysis`. The data
can be any of the types that you added using
trialHandler.data.add() and the analysis can be either
'raw' or most things in the numpy library, including
'mean','std','median','max','min'. e.g. `rt_max` will give
a column of max reaction times across the trials assuming
that `rt` values have been stored. The default values will
output the raw, mean and std of all datatypes found.
appendFile: True or False
If False any existing file with this name will be
overwritten. If True then a new worksheet will be appended.
If a worksheet already exists with that name a number will
be added to make it unique.
fileCollisionMethod: string
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`
This is ignored if ``append`` is ``True``.
"""
fileName = pathToString(fileName)
if stimOut is None:
stimOut = []
if self.thisTrialN < 1 and self.thisRepN < 1:
# if both are < 1 we haven't started
if self.autoLog:
logging.info('TrialHandler.saveAsExcel called but no '
'trials completed. Nothing saved')
return -1
# NB this was based on the limited documentation (1 page wiki) for
# openpyxl v1.0
if not haveOpenpyxl:
raise ImportError('openpyxl is required for saving files in'
' Excel (xlsx) format, but was not found.')
# return -1
# create the data array to be sent to the Excel file
dataArray = self._createOutputArray(stimOut=stimOut,
dataOut=dataOut,
matrixOnly=matrixOnly)
if not fileName.endswith('.xlsx'):
fileName += '.xlsx'
# create or load the file
if appendFile and os.path.isfile(fileName):
wb = load_workbook(fileName)
newWorkbook = False
else:
if not appendFile:
# the file exists but we're not appending, will be overwritten
fileName = handleFileCollision(fileName,
fileCollisionMethod)
wb = Workbook() # create new workbook
wb.properties.creator = 'PsychoPy' + psychopy.__version__
newWorkbook = True
if newWorkbook:
ws = wb.worksheets[0]
ws.title = sheetName
else:
ws = wb.create_sheet()
ws.title = sheetName
# loop through lines in the data matrix
for lineN, line in enumerate(dataArray):
if line is None:
continue
for colN, entry in enumerate(line):
if entry is None:
entry = ''
try:
# if it can convert to a number (from numpy) then do it
val = float(entry)
except Exception:
val = u"{}".format(entry)
ws.cell(column=colN+1, row=lineN+1, value=val)
wb.save(filename=fileName)
def saveAsJson(self,
fileName=None,
encoding='utf-8-sig',
fileCollisionMethod='rename'):
"""
Serialize the object to the JSON format.
Parameters
----------
fileName: string, or None
the name of the file to create or append. Can include a relative or
absolute path. If `None`, will not write to a file, but return an
in-memory JSON object.
encoding : string, optional
The encoding to use when writing the file.
fileCollisionMethod : string
Collision method passed to
:func:`~psychopy.tools.fileerrortools.handleFileCollision`. Can be
either of `'rename'`, `'overwrite'`, or `'fail'`.
Notes
-----
Currently, a copy of the object is created, and the copy's .origin
attribute is set to an empty string before serializing
because loading the created JSON file would sometimes fail otherwise.
"""
fileName = pathToString(fileName)
self_copy = copy.deepcopy(self)
self_copy.origin = ''
msg = ('Setting attribute .origin to empty string during JSON '
'serialization.')
logging.warn(msg)
if (fileName is None) or (fileName == 'stdout'):
return json_tricks.dumps(self_copy)
else:
with openOutputFile(fileName=fileName,
fileCollisionMethod=fileCollisionMethod,
encoding=encoding) as f:
json_tricks.dump(self_copy, f)
logging.info('Saved JSON data to %s' % f.name)
def getOriginPathAndFile(self, originPath=None):
"""Attempts to determine the path of the script that created this
data file and returns both the path to that script and its contents.
Useful to store the entire experiment with the data.
If originPath is provided (e.g. from Builder) then this is used
otherwise the calling script is the originPath (fine from a
standard python script).
"""
# self.originPath and self.origin (the contents of the origin file)
if originPath == -1:
return -1, None # the user wants to avoid storing this
elif originPath is None or not os.path.isfile(originPath):
try:
originPath = inspect.getouterframes(
inspect.currentframe())[2][1]
if self.autoLog:
logging.debug("Using %s as origin file" % originPath)
except Exception:
if self.autoLog:
logging.debug("Failed to find origin file using "
"inspect.getouterframes")
return '', ''
if os.path.isfile(originPath): # do we NOW have a path?
with codecs.open(originPath, "r", encoding="utf-8-sig") as f:
origin = f.read()
else:
origin = None
return originPath, origin
class DataHandler(_ComparisonMixin, dict):
"""For handling data (used by TrialHandler, principally, rather than
by users directly)
Numeric data are stored as numpy masked arrays where the mask is set
True for missing entries. When any non-numeric data (string, list or
array) get inserted using DataHandler.add(val) the array is converted
to a standard (not masked) numpy array with dtype='O' and where missing
entries have value = "--".
Attributes:
- ['key']=data arrays containing values for that key
(e.g. data['accuracy']=...)
- dataShape=shape of data (x,y,...z,nReps)
- dataTypes=list of keys as strings
"""
def __init__(self, dataTypes=None, trials=None, dataShape=None):
self.trials = trials
self.dataTypes = [] # names will be added during addDataType
self.isNumeric = {}
# if given dataShape use it - otherwise guess!
if dataShape:
self.dataShape = dataShape
elif self.trials:
self.dataShape = list(np.asarray(trials.trialList, 'O').shape)
self.dataShape.append(trials.nReps)
# initialise arrays now if poss
if dataTypes and self.dataShape:
for thisType in dataTypes:
self.addDataType(thisType)
def __eq__(self, other):
# We ignore an attached TrialHandler object, otherwise we will end up
# in an infinite loop, as this DataHandler is attached to the
# TrialHandler!
from psychopy.data import TrialHandler
if isinstance(self.trials, TrialHandler):
self_copy = copy.deepcopy(self)
other_copy = copy.deepcopy(other)
del self_copy.trials, other_copy.trials
result = super(DataHandler, self_copy).__eq__(other_copy)
msg = ('TrialHandler object detected in .trials. Excluding it from '
'comparison.')
logging.warning(msg)
else:
result = super(DataHandler, self).__eq__(other)
return result
def addDataType(self, names, shape=None):
"""Add a new key to the data dictionary of particular shape if
specified (otherwise the shape of the trial matrix in the trial
handler. Data are initialised to be zero everywhere. Not needed
by user: appropriate types will be added during initialisation
and as each xtra type is needed.
"""
if not shape:
shape = self.dataShape
if not isinstance(names, basestring):
# recursively call this function until we have a string
for thisName in names:
self.addDataType(thisName)
else:
# create the appropriate array in the dict
# initially use numpy masked array of floats with mask=True
# for missing vals. convert to a numpy array with dtype='O'
# if non-numeric data given. NB don't use masked array with
# dytpe='O' together - they don't unpickle
self[names] = np.ma.zeros(shape, 'f') # masked array of floats
self[names].mask = True
# add the name to the list
self.dataTypes.append(names)
self.isNumeric[names] = True # until we need otherwise
def add(self, thisType, value, position=None):
"""Add data to an existing data type (and add a new one if necess)
"""
if not thisType in self:
self.addDataType(thisType)
if position is None:
# 'ran' is always the first thing to update
repN = sum(self['ran'][self.trials.thisIndex])
if thisType != 'ran':
# because it has already been updated
repN -= 1
# make a list where 1st digit is trial number
position = [self.trials.thisIndex]
position.append(repN)
# check whether data falls within bounds
posArr = np.asarray(position)
shapeArr = np.asarray(self.dataShape)
if not np.alltrue(posArr < shapeArr):
# array isn't big enough
logging.warning('need a bigger array for: ' + thisType)
# not implemented yet!
self[thisType] = extendArr(self[thisType], posArr)
# check for ndarrays with more than one value and for non-numeric data
if (self.isNumeric[thisType] and
((type(value) == np.ndarray and len(value) > 1) or
(type(value) not in [float, int]))):
self._convertToObjectArray(thisType)
# insert the value
self[thisType][position[0], int(position[1])] = value
def _convertToObjectArray(self, thisType):
"""Convert this datatype from masked numeric array to unmasked
object array
"""
dat = self[thisType]
# create an array of Object type
self[thisType] = np.array(dat.data, dtype='O')
# masked vals should be "--", others keep data
# we have to repeat forcing to 'O' or text gets truncated to 4chars
self[thisType] = np.where(dat.mask, '--', dat).astype('O')
self.isNumeric[thisType] = False
| gpl-3.0 |
zuku1985/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
sugartom/tensorflow-alien | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 2 | 36442 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffold(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaises(ValueError):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
nvoron23/statsmodels | statsmodels/sandbox/tsa/example_arma.py | 27 | 11572 | '''trying to verify theoretical acf of arma
explicit functions for autocovariance functions of ARIMA(1,1), MA(1), MA(2)
plus 3 functions from nitime.utils
'''
from __future__ import print_function
from statsmodels.compat.python import range
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.mlab as mlab
from statsmodels.tsa.arima_process import arma_generate_sample, arma_impulse_response
from statsmodels.tsa.arima_process import arma_acovf, arma_acf, ARIMA
#from movstat import acf, acovf
#from statsmodels.sandbox.tsa import acf, acovf, pacf
from statsmodels.tsa.stattools import acf, acovf, pacf
ar = [1., -0.6]
#ar = [1., 0.]
ma = [1., 0.4]
#ma = [1., 0.4, 0.6]
#ma = [1., 0.]
mod = ''#'ma2'
x = arma_generate_sample(ar, ma, 5000)
x_acf = acf(x)[:10]
x_ir = arma_impulse_response(ar, ma)
#print x_acf[:10]
#print x_ir[:10]
#irc2 = np.correlate(x_ir,x_ir,'full')[len(x_ir)-1:]
#print irc2[:10]
#print irc2[:10]/irc2[0]
#print irc2[:10-1] / irc2[1:10]
#print x_acf[:10-1] / x_acf[1:10]
# detrend helper from matplotlib.mlab
def detrend(x, key=None):
if key is None or key=='constant':
return detrend_mean(x)
elif key=='linear':
return detrend_linear(x)
def demean(x, axis=0):
"Return x minus its mean along the specified axis"
x = np.asarray(x)
if axis:
ind = [slice(None)] * axis
ind.append(np.newaxis)
return x - x.mean(axis)[ind]
return x - x.mean(axis)
def detrend_mean(x):
"Return x minus the mean(x)"
return x - x.mean()
def detrend_none(x):
"Return x: no detrending"
return x
def detrend_linear(y):
"Return y minus best fit line; 'linear' detrending "
# This is faster than an algorithm based on linalg.lstsq.
x = np.arange(len(y), dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0,1]/C[0,0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def acovf_explicit(ar, ma, nobs):
'''add correlation of MA representation explicitely
'''
ir = arma_impulse_response(ar, ma)
acovfexpl = [np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(10)]
return acovfexpl
def acovf_arma11(ar, ma):
# ARMA(1,1)
# Florens et al page 278
# wrong result ?
# new calculation bigJudge p 311, now the same
a = -ar[1]
b = ma[1]
#rho = [1.]
#rho.append((1-a*b)*(a-b)/(1.+a**2-2*a*b))
rho = [(1.+b**2+2*a*b)/(1.-a**2)]
rho.append((1+a*b)*(a+b)/(1.-a**2))
for _ in range(8):
last = rho[-1]
rho.append(a*last)
return np.array(rho)
# print acf11[:10]
# print acf11[:10] /acf11[0]
def acovf_ma2(ma):
# MA(2)
# from Greene p616 (with typo), Florens p280
b1 = -ma[1]
b2 = -ma[2]
rho = np.zeros(10)
rho[0] = (1 + b1**2 + b2**2)
rho[1] = (-b1 + b1*b2)
rho[2] = -b2
return rho
# rho2 = rho/rho[0]
# print rho2
# print irc2[:10]/irc2[0]
def acovf_ma1(ma):
# MA(1)
# from Greene p616 (with typo), Florens p280
b = -ma[1]
rho = np.zeros(10)
rho[0] = (1 + b**2)
rho[1] = -b
return rho
# rho2 = rho/rho[0]
# print rho2
# print irc2[:10]/irc2[0]
ar1 = [1., -0.8]
ar0 = [1., 0.]
ma1 = [1., 0.4]
ma2 = [1., 0.4, 0.6]
ma0 = [1., 0.]
comparefn = dict(
[('ma1', acovf_ma1),
('ma2', acovf_ma2),
('arma11', acovf_arma11),
('ar1', acovf_arma11)])
cases = [('ma1', (ar0, ma1)),
('ma2', (ar0, ma2)),
('arma11', (ar1, ma1)),
('ar1', (ar1, ma0))]
for c, args in cases:
ar, ma = args
print('')
print(c, ar, ma)
myacovf = arma_acovf(ar, ma, nobs=10)
myacf = arma_acf(ar, ma, nobs=10)
if c[:2]=='ma':
othacovf = comparefn[c](ma)
else:
othacovf = comparefn[c](ar, ma)
print(myacovf[:5])
print(othacovf[:5])
#something broke again,
#for high persistence case eg ar=0.99, nobs of IR has to be large
#made changes to arma_acovf
assert_array_almost_equal(myacovf, othacovf,10)
assert_array_almost_equal(myacf, othacovf/othacovf[0],10)
#from nitime.utils
def ar_generator(N=512, sigma=1.):
# this generates a signal u(n) = a1*u(n-1) + a2*u(n-2) + ... + v(n)
# where v(n) is a stationary stochastic process with zero mean
# and variance = sigma
# this sequence is shown to be estimated well by an order 8 AR system
taps = np.array([2.7607, -3.8106, 2.6535, -0.9238])
v = np.random.normal(size=N, scale=sigma**0.5)
u = np.zeros(N)
P = len(taps)
for l in range(P):
u[l] = v[l] + np.dot(u[:l][::-1], taps[:l])
for l in range(P,N):
u[l] = v[l] + np.dot(u[l-P:l][::-1], taps)
return u, v, taps
#JP: small differences to using np.correlate, because assumes mean(s)=0
# denominator is N, not N-k, biased estimator
# misnomer: (biased) autocovariance not autocorrelation
#from nitime.utils
def autocorr(s, axis=-1):
"""Returns the autocorrelation of signal s at all lags. Adheres to the
definition r(k) = E{s(n)s*(n-k)} where E{} is the expectation operator.
"""
N = s.shape[axis]
S = np.fft.fft(s, n=2*N-1, axis=axis)
sxx = np.fft.ifft(S*S.conjugate(), axis=axis).real[:N]
return sxx/N
#JP: with valid this returns a single value, if x and y have same length
# e.g. norm_corr(x, x)
# using std subtracts mean, but correlate doesn't, requires means are exactly 0
# biased, no n-k correction for laglength
#from nitime.utils
def norm_corr(x,y,mode = 'valid'):
"""Returns the correlation between two ndarrays, by calling np.correlate in
'same' mode and normalizing the result by the std of the arrays and by
their lengths. This results in a correlation = 1 for an auto-correlation"""
return ( np.correlate(x,y,mode) /
(np.std(x)*np.std(y)*(x.shape[-1])) )
# from matplotlib axes.py
# note: self is axis
def pltacorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=True, detrend=detrend_none, usevlines=True,
maxlags=10, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`
For documentation on valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
def pltxcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
"""
call signature::
def xcorr(self, x, y, normed=True, detrend=detrend_none,
usevlines=True, maxlags=10, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
d = self.plot(lags, c, **kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
arrvs = ar_generator()
##arma = ARIMA()
##res = arma.fit(arrvs[0], 4, 0)
arma = ARIMA(arrvs[0])
res = arma.fit((4,0, 0))
print(res[0])
acf1 = acf(arrvs[0])
acovf1b = acovf(arrvs[0], unbiased=False)
acf2 = autocorr(arrvs[0])
acf2m = autocorr(arrvs[0]-arrvs[0].mean())
print(acf1[:10])
print(acovf1b[:10])
print(acf2[:10])
print(acf2m[:10])
x = arma_generate_sample([1.0, -0.8], [1.0], 500)
print(acf(x)[:20])
import statsmodels.api as sm
print(sm.regression.yule_walker(x, 10))
import matplotlib.pyplot as plt
#ax = plt.axes()
plt.plot(x)
#plt.show()
plt.figure()
pltxcorr(plt,x,x)
plt.figure()
pltxcorr(plt,x,x, usevlines=False)
plt.figure()
#FIXME: plotacf was moved to graphics/tsaplots.py, and interface changed
plotacf(plt, acf1[:20], np.arange(len(acf1[:20])), usevlines=True)
plt.figure()
ax = plt.subplot(211)
plotacf(ax, acf1[:20], usevlines=True)
ax = plt.subplot(212)
plotacf(ax, acf1[:20], np.arange(len(acf1[:20])), usevlines=False)
#plt.show()
| bsd-3-clause |
tgquintela/NetSymulationTools | setup.py | 2 | 2050 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for the installation of NetTools.
It is possible to install this package with
python setup.py install
"""
from glob import glob
import sys
import os
import warnings
from NetTools import release
## Temporally commented
#if os.path.exists('MANIFEST'):
# os.remove('MANIFEST')
## Definition of useful functions
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
## Check problems with the setuptools
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'setup.py':
print("To install, run 'python setup.py install'")
print()
version = release.write_versionfile()
packages = ['NetTools',
'NetTools.NetSimulation', 'NetTools.NetStructure',
'NetTools.tests'
]
docdirbase = 'share/doc/NetTools-%s' % version
# add basic documentation
data = [(docdirbase, glob("*.txt"))]
# add examples
for d in ['advanced',
'algorithms']:
dd = os.path.join(docdirbase, 'examples', d)
pp = os.path.join('examples', d)
data.append((dd, glob(os.path.join(pp, "*.py"))))
data.append((dd, glob(os.path.join(pp, "*.bz2"))))
data.append((dd, glob(os.path.join(pp, "*.gz"))))
data.append((dd, glob(os.path.join(pp, "*.mbox"))))
data.append((dd, glob(os.path.join(pp, "*.edgelist"))))
# add the tests
#package_data = {'NetTools': ['tests/*.py']}
package_data = {}
install_requires = ['numpy', 'scipy', 'pandas', 'matplotlib']
## Setup
setup(name=release.name,
version=version,
description=release.description,
license=release.license,
platforms=release.platforms,
maintainer=release.maintainer,
maintainer_email=release.maintainer_email,
author=release.author,
author_email=release.author_email,
url=release.url,
classifiers=release.classifiers,
long_description=read('README.md'),
packages=packages,
install_requires=install_requires,
)
| mit |
saromanov/scikit-json | scikit_json/scikit_json.py | 1 | 8294 |
#from sklearn.datasets import load_digits
import os
import json
# Dirty, but it needs to load all models
from sklearn import *
from sklearn.externals import joblib
import sklearn
import argparse
import functools
import numpy as np
import logging
class Scikitjson:
def __init__(self):
self.jsonmodel = None
self.path = None
def loadFile(self, path):
self.jsonmodel = self._innerLoad(path)
self.path = path
def loadJSONModel(self, model):
""" Load model without reading from file"""
self.jsonmodel = json.loads(model)
return self.jsonmodel
def _innerLoad(self, path):
if not os.path.exists(path):
raise Exception("file {0} not found".format(path))
fs = open(path, 'r')
raw_data = fs.read()
fs.close()
return self.loadJSONModel(raw_data)
def run(self):
if self.jsonmodel == None:
raise Exception("Model was not loaded")
model = ConstructModel(self.jsonmodel, title=self.path)
return model.run()
class ConstructModel:
def __init__(self, jsonmodel, title=None):
self.jsonmodel = jsonmodel
self.title = title
def _construct_dataset(self, title):
alldatasets = dir(datasets)
if title in alldatasets:
ds = getattr(datasets, title)()
return ds.data, ds.target
def _construct_user_dataset(self, userdataset):
''' Load data from file '''
logging.info("Start to construct user dataset")
filetype = 'default'
if 'path' not in userdataset:
raise Exception("path param is not found")
path = userdataset['path']
if 'data' not in userdataset:
raise Exception(
'data param (start and indexes on training) not found')
else:
dataidx = userdataset['data']
if 'labels' not in userdataset:
print(
'Labels param not found. Default label index will be last index on file')
labelsidx = []
else:
labelsidx = userdataset['labels']
if 'split' not in userdataset:
splitter = ' '
else:
splitter = userdataset['split']
if not os.path.exists(path):
raise Exception("Dataset file not found")
if 'type' in userdataset:
filetype = userdataset['type']
if filetype == 'default':
return self._parse_dataset_by_default(path)
if tiletype == 'csv':
return self._parse_as_csv(path)
else:
raise Exception("This type of dataset format is not supported")
def _parse_dataset_by_default(self, path):
fs = open(path, 'r')
lines = fs.readlines()
fs.close()
X = []
y = []
for line in lines:
res = line.split(splitter)
X.append(res[dataidx[0]: dataidx[1]])
y.extend(res[labelsidx[0]: labelsidx[1]])
log.info("Finished to construct user dataset")
return np.array(X), np.array(y)
def _parse_as_csv(self, path):
if not os.path.exists(path):
raise Exception("Path for loading dataset is not found")
fs = open(path, 'r')
data = fs.read()
fs.close()
return csv.reader(data)
def _split_dataset(self, X, y):
''' Split current dataset on training and testing '''
pass
def _construct_method(self, title):
return self._find_method(title)
def _find_method(self, title):
args = {}
if isinstance(title, dict):
candsplit = title['name'].split('.')
args = title['params']
else:
candsplit = title.split('.')
allmethods = dir(sklearn)
if len(candsplit) > 1:
name = candsplit[0]
#model = sklearn
return functools.reduce(lambda x, a: getattr(x, a), candsplit[1:], getattr(sklearn, name))(**args)
def _random_forest(self):
from sklearn.ensemble import RandomForestClassifier
return RandomForestClassifier(n_estimators=100)
def _construct_default_model(self, typetitle):
""" This comes from 'type'"""
logging.info("Start to construct deafault model")
typetitle = typetitle.lower()
if typetitle == 'classification':
return self._random_forest()
if typetitle == 'regression':
from sklearn.linear_model import LogisticRegression
return LogisticRegression(penalty='l2')
if typetitle == 'clustering':
from sklearn.cluster import KMeans
return KMeans()
def try_to_save(self, model, path):
''' In the case if parameter save in on '''
if path == None:
return
joblib.dump(model, path, compress=9)
def try_to_load(self, path):
return joblib.load(path)
def _predict_and_show(self, method, methodname, data):
result = method.predict(data)
print("Result: {0} ({1})".format(result, methodname))
return result
def run(self):
if self.title != None:
print("Model from {0}\n".format(self.title))
modelnames = list(self.jsonmodel.keys())
if len(list(modelnames)) == 0:
return []
for key in list(modelnames):
yield self.run_inner(key)
def run_inner(self, name):
'''
return predicted value
'''
logging.info("Start to prepare model {0}".format(name))
print("Model name: {0} ".format(name))
typeparams = self.jsonmodel[name]
if typeparams == {}:
return []
items = {key.lower(): value for (key, value) in typeparams.items()}
''' In the case if exists some model.pkl
Example of usage:
loading.json
{
"class1" :{
load:"model.pkl",
predict: [1,2,3]
}
}
'''
if 'load' in items:
method = self.try_to_load(items['load'])
if 'predict' not in items:
return
return self._predict_and_show(method, items['predict'])
''' In the case if you want experimenting with datasets in sklearn'''
if 'dataset' in items:
X, y = self._construct_dataset(items['dataset'])
if 'dataset_file' in items:
X, y = self._construct_user_dataset(items['dataset_file'])
methodname = items['method'] if 'method' in items else 'RandomForest'
method = self._construct_method(
items['method']) if 'method' in items else self._random_forest()
if 'method' in items:
method = self._construct_method(items['method'])
elif 'type' in items:
# Now supported is 'classification' and 'regression'
thistype = items['type']
method = self._construct_default_model(thistype)
'''else:
raise Exception("Model not found")'''
method.fit(X, y)
self.try_to_save(method, items['save'] if 'save' in items else None)
if 'predict' not in items:
print("Predict not contains in your model")
return
return self._predict_and_show(method, methodname, items['predict'])
def configure_logging(level):
if level == None:
return
level = level.lower()
title = logging.NOTSET
if level == 'debug':
title = logging.DEBUG
if level == 'info':
title = logging.INFO
if level == 'warning':
title = logging.ERROR
if level == 'critical':
title = logging.CRITICAL
if level == 'error':
title = logging.ERROR
logging.basicConfig(level=title)
def main(path):
sj = Scikitjson()
if path == None:
log.error("Path to JSON model not found")
return
sj.loadFile(path)
print(list(sj.run()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--json', help='path to json model')
parser.add_argument(
'--loglevel', help='DEBUG level to show all info messages')
args = parser.parse_args()
configure_logging(args.loglevel)
main(args.json)
| mit |
achon22/cs231nLung | luna/preprocess.py | 1 | 9510 | # Luna preprocessing code from https://www.kaggle.com/arnavkj95/candidate-generation-and-luna16-preprocessing
import numpy as np # linear algebra
import os
import skimage, os
from skimage.morphology import ball, disk, dilation, binary_erosion, remove_small_objects, erosion, closing, reconstruction, binary_closing
from skimage.measure import label,regionprops, perimeter
from skimage.morphology import binary_dilation, binary_opening
from skimage.filters import roberts, sobel
from skimage import measure, feature
from skimage.segmentation import clear_border
from skimage import data
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import dicom
import scipy.misc
import numpy as np
import SimpleITK as sitk
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
def get_annotations(filename):
lines = [line.strip().split(',') for line in open(filename)][1:]
result = {}
for i in range(len(lines)):
key = lines[i][0]
for j in range(1,5):
lines[i][j] = float(lines[i][j])
if key in result:
result[key].append(lines[i])
else:
result[key] = [lines[i]]
return result
def segment_lung_from_ct_scan(ct_scan):
return np.asarray([get_segmented_lungs(slice) for slice in ct_scan])
def get_segmented_lungs(im, plot=False):
'''
This funtion segments the lungs from the given 2D slice.
'''
if plot == True:
f, plots = plt.subplots(8, 1, figsize=(5, 40))
'''
Step 1: Convert into a binary image.
'''
binary = im < 604
if plot == True:
plots[0].axis('off')
plots[0].imshow(binary, cmap=plt.cm.bone)
'''
Step 2: Remove the blobs connected to the border of the image.
'''
cleared = clear_border(binary)
if plot == True:
plots[1].axis('off')
plots[1].imshow(cleared, cmap=plt.cm.bone)
'''
Step 3: Label the image.
'''
label_image = label(cleared)
if plot == True:
plots[2].axis('off')
plots[2].imshow(label_image, cmap=plt.cm.bone)
'''
Step 4: Keep the labels with 2 largest areas.
'''
areas = [r.area for r in regionprops(label_image)]
areas.sort()
if len(areas) > 2:
for region in regionprops(label_image):
if region.area < areas[-2]:
for coordinates in region.coords:
label_image[coordinates[0], coordinates[1]] = 0
binary = label_image > 0
if plot == True:
plots[3].axis('off')
plots[3].imshow(binary, cmap=plt.cm.bone)
'''
Step 5: Erosion operation with a disk of radius 2. This operation is
seperate the lung nodules attached to the blood vessels.
'''
selem = disk(2)
binary = binary_erosion(binary, selem)
if plot == True:
plots[4].axis('off')
plots[4].imshow(binary, cmap=plt.cm.bone)
'''
Step 6: Closure operation with a disk of radius 10. This operation is
to keep nodules attached to the lung wall.
'''
selem = disk(10)
binary = binary_closing(binary, selem)
if plot == True:
plots[5].axis('off')
plots[5].imshow(binary, cmap=plt.cm.bone)
'''
Step 7: Fill in the small holes inside the binary mask of lungs.
'''
edges = roberts(binary)
binary = ndi.binary_fill_holes(edges)
if plot == True:
plots[6].axis('off')
plots[6].imshow(binary, cmap=plt.cm.bone)
'''
Step 8: Superimpose the binary mask on the input image.
'''
get_high_vals = binary == 0
im[get_high_vals] = 0
if plot == True:
plots[7].axis('off')
plots[7].imshow(im, cmap=plt.cm.bone)
return im
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
RESIZE_SPACING = [1, 1, 1]
image_mask = np.zeros(image.shape)
#run over all the nodules in the lungs
for ca in cands:
#get middel x-,y-, and z-worldcoordinate of the nodule
radius = np.ceil(ca[4])/2
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
noduleRange = seq(-radius, radius, RESIZE_SPACING[0])
#create the mask
for x in noduleRange:
for y in noduleRange:
for z in noduleRange:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
def create_nodule_mask(imagePath, maskPath, cands, exId):
#if os.path.isfile(imagePath.replace('original',SAVE_FOLDER_image)) == False:
img, origin, spacing = load_itk(imagePath)
#calculate resize factor
RESIZE_SPACING = [1, 1, 1]
resize_factor = spacing / RESIZE_SPACING
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
#resize image
lung_img = scipy.ndimage.interpolation.zoom(img, real_resize)
# Segment the lung structure
lung_img = lung_img + 1024
lung_mask = segment_lung_from_ct_scan(lung_img)
lung_img = lung_img - 1024
#create nodule mask
nodule_mask = draw_circles(lung_img,cands,origin,new_spacing)
lung_img_512, lung_mask_512, nodule_mask_512 = np.zeros((lung_img.shape[0], 512, 512)), np.zeros((lung_mask.shape[0], 512, 512)), np.zeros((nodule_mask.shape[0], 512, 512))
original_shape = lung_img.shape
for z in range(lung_img.shape[0]):
offset = (512 - original_shape[1])
upper_offset = np.round(offset/2)
lower_offset = offset - upper_offset
new_origin = voxel_2_world([-upper_offset,-lower_offset,0],origin,new_spacing)
lung_img_512[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_img[z,:,:]
lung_mask_512[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = lung_mask[z,:,:]
nodule_mask_512[z, upper_offset:-lower_offset,upper_offset:-lower_offset] = nodule_mask[z,:,:]
# save images.
#np.save(maskPath + exId + '_lung_img.npz', lung_img_512)
'''
np.save(maskPath + exId + '_lung_mask.npz', lung_mask_512)
np.save(maskPath + exId + '_nodule_mask.npz', nodule_mask_512)
'''
included_slices = [z for z in range(lung_img_512.shape[0]) if np.sum(nodule_mask_512[z]) > 0.001]
np.save(maskPath + exId + '_lung_mask', lung_mask_512[included_slices].astype(np.float16))
np.save(maskPath + exId + '_nodule_mask', nodule_mask_512[included_slices].astype(np.bool))
def main():
# ct_scan, origin, spacing = load_itk("lunaFiles/subset5/1.3.6.1.4.1.14519.5.2.1.6279.6001.255999614855292116767517149228.mhd")
# print(ct_scan.shape)
# print(origin.shape)
# print(spacing.shape)
cands = get_annotations("CSVFILES/annotations.csv")
# exId = "1.3.6.1.4.1.14519.5.2.1.6279.6001.323408652979949774528873200770"
path = "lunaFiles/subset9/"
patients = os.listdir(path)
allIds = set([patient[:-4] for patient in patients])
for exId in allIds:
if exId in cands:
create_nodule_mask(path + exId + ".mhd", "masks/", cands[exId], exId)
print("finished " + exId)
else:
print("no nodules in " + exId)
if __name__ == '__main__':
main()
| mit |
zihua/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 202 | 3757 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
intuition-io/intuition | tests/api/test_datafeed.py | 1 | 9643 | '''
Tests for intuition.api.datafeed
'''
import unittest
from nose.tools import raises, ok_, eq_, nottest
import random
import pytz
import datetime as dt
import pandas as pd
import intuition.api.datafeed as datafeed
from intuition.data.universe import Market
from intuition.errors import InvalidDatafeed
import dna.test_utils
class FakeBacktestDatasource(object):
def __init__(self, sids, properties):
pass
@property
def mapping(self):
return {
'backtest': (lambda x: True, 'sid'),
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
def get_data(self, sids, start, end):
index = pd.date_range(start, end, tz=pytz.utc)
return pd.DataFrame({sid: [random.random()] * len(index)
for sid in sids}, index=index)
class FakePanelBacktestDatasource(object):
def __init__(self, sids, properties):
pass
@property
def mapping(self):
return {
'backtest': (lambda x: True, 'sid'),
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'low': (float, 'low'),
'high': (float, 'high'),
'volume': (int, 'volume'),
}
def get_data(self, sids, start, end):
index = pd.date_range(start, end, tz=pytz.utc)
fake_data = {}
for sid in sids:
fake_data[sid] = pd.DataFrame(
{field: [random.random()] * len(index)
for field in ['price', 'low', 'high', 'volume']}, index=index)
return pd.Panel(fake_data)
class FakePanelWithoutVolumeBacktestDatasource(object):
def __init__(self, sids, properties):
pass
def get_data(self, sids, start, end):
index = pd.date_range(start, end, tz=pytz.utc)
fake_data = {}
for sid in sids:
fake_data[sid] = pd.DataFrame(
{field: [random.random()] * len(index)
for field in ['price', 'low', 'high']}, index=index)
return pd.Panel(fake_data)
class FakeLiveDatasource(object):
def __init__(self, sids, properties):
pass
@property
def mapping(self):
return {
'live': True
}
def get_data(self, sids, start, end):
return pd.DataFrame()
class DatafeedUtilsTestCase(unittest.TestCase):
def setUp(self):
dna.test_utils.setup_logger(self)
self.fake_sid = 'fake_sid'
self.fake_one_sid_series = pd.Series(
{key: random.random() for key in ['low', 'close']})
self.fake_multiple_sids_series = pd.Series(
{key: random.random() for key in ['goog', 'fake_sid']})
self.fake_multiple_sids_df = pd.DataFrame(
{key: {'price': random.random(), 'close': 0.3}
for key in ['goog', 'fake_sid']})
self.fake_date = dt.datetime(2013, 1, 1)
def tearDown(self):
dna.test_utils.teardown_logger(self)
@nottest
def _check_event(self, event):
self.assertIsInstance(event, dict)
self.assertIn('volume', event)
self.assertIn('dt', event)
eq_(event['dt'], self.fake_date)
eq_(event['sid'], self.fake_sid)
def test_build_safe_event_without_volume(self):
partial_event = self.fake_one_sid_series.to_dict()
event = datafeed._build_safe_event(
partial_event, self.fake_date, self.fake_sid)
self._check_event(event)
for field in self.fake_one_sid_series.index:
self.assertIn(field, event.keys())
def test_build_safe_event_with_volume(self):
partial_event = self.fake_one_sid_series.to_dict()
partial_event.update({'volume': 12034})
event = datafeed._build_safe_event(
partial_event, self.fake_date, self.fake_sid)
self._check_event(event)
for field in self.fake_one_sid_series.index:
self.assertIn(field, event.keys())
@raises(AttributeError)
def test_wrong_data_type(self):
wrong_type = bool
datafeed._build_safe_event(wrong_type, self.fake_date, self.fake_sid)
def test_check_data_modules(self):
end = self.fake_date + pd.datetools.MonthBegin(6)
ok_(datafeed._check_data_modules(
'backtest.module', None, self.fake_date, end))
@raises(InvalidDatafeed)
def test_check_data_modules_all_nones(self):
end = self.fake_date + pd.datetools.MonthBegin(6)
datafeed._check_data_modules(None, None, self.fake_date, end)
class HybridDataFactoryTestCase(unittest.TestCase):
def setUp(self):
dna.test_utils.setup_logger(self)
self.test_index = pd.date_range(
'2012/01/01', '2012/01/7', tz=pytz.utc)
self.test_universe = 'forex,5'
self.market = Market()
self.market.parse_universe_description(self.test_universe)
self.test_sids = self.market.sids
def tearDown(self):
dna.test_utils.teardown_logger(self)
@nottest
def _check_datasource(self, source):
ok_((source.index == self.test_index).all())
eq_(source.start, self.test_index[0])
eq_(source.end, self.test_index[-1])
eq_(source.sids, self.test_sids)
self.assertIsNone(source._raw_data)
eq_(source.arg_string, source.instance_hash)
eq_(source.event_type, 4)
ok_(hasattr(source, 'log'))
self.assertFalse(source._is_live)
@raises(InvalidDatafeed)
def test_data_source_without_modules(self):
config = {
'sids': self.test_sids,
'index': self.test_index
}
datafeed.HybridDataFactory(**config)
@raises(InvalidDatafeed)
def test_data_source_invalid_index(self):
config = {
'sids': self.test_sids,
'index': bool
}
datafeed.HybridDataFactory(**config)
def test_minimal_data_source(self):
source = datafeed.HybridDataFactory(
universe=self.market,
index=self.test_index,
backtest=FakeBacktestDatasource)
self._check_datasource(source)
def test_hybrid_mapping(self):
source = datafeed.HybridDataFactory(
universe=self.market,
index=self.test_index,
backtest=FakeBacktestDatasource,
live=FakeLiveDatasource)
self.assertIn('backtest', source.mapping)
source._is_live = True
self.assertIn('live', source.mapping)
# TODO Test Live data sources
class SpecificMarketDataFactoryTestCase(unittest.TestCase):
def setUp(self):
dna.test_utils.setup_logger(self)
self.test_index = pd.date_range(
'2012/01/01', '2012/01/7', tz=pytz.utc)
def tearDown(self):
dna.test_utils.teardown_logger(self)
def test_dataframe_forex_backtest_data_generation(self):
test_universe = 'forex,5'
market = Market()
market.parse_universe_description(test_universe)
source = datafeed.HybridDataFactory(
universe=market,
index=self.test_index,
backtest=FakeBacktestDatasource)
total_rows = 0
for row in source.raw_data:
if not total_rows:
self.assertListEqual(
sorted(row.keys()),
sorted(['dt', 'price', 'sid', 'volume']))
total_rows += 1
eq_(total_rows, 2 * len(self.test_index) * len(market.sids))
def test_dataframe_cac40_backtest_data_generation(self):
test_universe = 'stocks:paris:cac40'
market = Market()
market.parse_universe_description(test_universe)
source = datafeed.HybridDataFactory(
universe=market,
index=self.test_index,
backtest=FakeBacktestDatasource)
total_rows = 0
for row in source.raw_data:
if not total_rows:
self.assertListEqual(
sorted(row.keys()),
sorted(['dt', 'price', 'sid', 'volume']))
total_rows += 1
eq_(total_rows, len(self.test_index) * len(market.sids))
def test_panel_cac40_backtest_data_generation(self):
test_universe = 'stocks:paris:cac40'
market = Market()
market.parse_universe_description(test_universe)
source = datafeed.HybridDataFactory(
universe=market,
index=self.test_index,
backtest=FakePanelBacktestDatasource)
total_rows = 0
for row in source.raw_data:
if not total_rows:
self.assertListEqual(
sorted(row.keys()),
sorted(['dt', 'price', 'low', 'high', 'sid', 'volume']))
total_rows += 1
eq_(total_rows, len(self.test_index) * len(market.sids))
def test_panel_without_volume_cac40_backtest_data_generation(self):
test_universe = 'stocks:paris:cac40,5'
market = Market()
market.parse_universe_description(test_universe)
source = datafeed.HybridDataFactory(
universe=market,
index=self.test_index,
backtest=FakePanelWithoutVolumeBacktestDatasource)
total_rows = 0
for row in source.raw_data:
if not total_rows:
self.assertListEqual(
sorted(row.keys()),
sorted(['dt', 'price', 'low', 'high', 'sid', 'volume']))
total_rows += 1
eq_(total_rows, len(self.test_index) * len(market.sids))
| apache-2.0 |
HolgerPeters/scikit-learn | examples/tree/plot_iris.py | 86 | 1965 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
fzalkow/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
mblondel/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
chenhh/PySPPortfolio | PySPPortfolio/example/portfolio_maxRet/MultiStagePortfolio.py | 1 | 4920 | # -*- coding: utf-8 -*-
'''
@author: Hung-Hsin Chen
@mail: [email protected]
'''
import time
import os
from datetime import date
import numpy as np
import pandas as pd
from coopr.pyomo import (Set, RangeSet, Param, Var, Objective, Constraint,
ConcreteModel, Reals, NonNegativeReals, maximize, display)
from coopr.opt import SolverFactory
PklBasicFeaturesDir = os.path.join(os.getcwd(),'pkl', 'BasicFeatures')
def maxRetPortfolio(riskyRetMtx, riskFreeRetVec,
buyTransFeeMtx, sellTransFeeMtx,
allocatedWealth):
'''
-假設有T期的資料, 投資在M個risky assets, 以及1個riskfree asset
-在(T+1)期初結算
goal: 最大化T+1期期初財產
@param riskyRetMtx, numpy.array, size: M * T+1
@param riskFreeRetVec, numpy.array, size: T+1
@param buyTransFeeMtx, numpy.array, size: M * T
@param sellTransFeeMtx, numpy.array, size: M * T
@param allocatedWealth, numpy.array, size: (M+1) (最後一個為cash)
@return (buyMtx, sellMtx), numpy.array, each size: M*T
'''
assert buyTransFeeMtx.shape == sellTransFeeMtx.shape
assert riskyRetMtx.shape[1] == riskFreeRetVec.size
M, T = buyTransFeeMtx.shape
t1 = time.time()
#create model
model = ConcreteModel()
#number of asset and number of periods
model.symbols = range(M)
model.T = range(T)
model.Tp1 = range(T+1)
model.Mp1 = range(M+1)
#decision variables
model.buys = Var(model.symbols, model.T, within=NonNegativeReals)
model.sells = Var(model.symbols, model.T, within=NonNegativeReals)
model.riskyWealth = Var(model.symbols, model.T, within=NonNegativeReals)
model.riskFreeWealth = Var(model.T, within=NonNegativeReals)
#objective
def objective_rule(model):
wealth =sum( (1. + riskyRetMtx[m, T]) * model.riskyWealth[m, T-1]
for m in xrange(M))
wealth += (1.+ riskFreeRetVec[T]) * model.riskFreeWealth[T-1]
return wealth
model.objective = Objective(rule=objective_rule, sense=maximize)
#constraint
def riskyWealth_constraint_rule(model, m, t):
if t>=1:
preWealth = model.riskyWealth[m, t-1]
else:
preWealth = allocatedWealth[m]
return (model.riskyWealth[m, t] ==
(1. + riskyRetMtx[m,t])*preWealth +
model.buys[m,t] - model.sells[m,t])
def riskyFreeWealth_constraint_rule(model, t):
totalSell = sum((1-sellTransFeeMtx[mdx, t])*model.sells[mdx, t]
for mdx in xrange(M))
totalBuy = sum((1+buyTransFeeMtx[mdx, t])*model.buys[mdx, t]
for mdx in xrange(M))
if t >=1:
preWealth = model.riskFreeWealth[t-1]
else:
preWealth = allocatedWealth[-1]
return( model.riskFreeWealth[t] ==
(1. + riskFreeRetVec[t])*preWealth +
totalSell - totalBuy)
model.riskyWeathConstraint = Constraint(model.symbols, model.T,
rule=riskyWealth_constraint_rule)
model.riskFreeWealthConstraint = Constraint(model.T,
rule=riskyFreeWealth_constraint_rule)
#optimizer
opt = SolverFactory('cplex')
# opt.options["threads"] = 4
instance = model.create()
results = opt.solve(instance)
print results
# instance.load(results)
# display(instance)
# instance.load(results)
# for var in instance.active_components(Var):
# varobj = getattr(instance, var)
# for idx in varobj:
# print varobj[idx], varobj[idx].value
#
print "elapsed %.3f secs"%(time.time()-t1)
def constructModelData():
# symbols = ('1101', '1102', '1103')
symbols = ('1101', '1102', '1103', '1104' )
startDate, endDate = date(2005,1,1), date(2012, 12, 31)
dfs = []
for symbol in symbols:
df = pd.read_pickle(os.path.join(PklBasicFeaturesDir,
'BasicFeatures_%s_00-12.pkl'%symbol))
dfs.append(df[startDate: endDate])
M, T = len(symbols), dfs[0].index.size - 1
money = 1e5
riskyRetMtx = np.empty((M, T+1))
for idx, df in enumerate(dfs):
riskyRetMtx[idx, :] = df['adjROI'].values/100
riskFreeRetVec = np.zeros(T+1)
buyTransFeeMtx = np.ones((M,T))* 0.003 #買進0.3%手續費
sellTransFeeMtx = np.ones((M,T))* 0.004425 #賣出0.3%手續費+0.1425%交易稅
allocated = np.zeros(M+1)
allocated[-1] = money
maxRetMultiStagePortfolio(riskyRetMtx, riskFreeRetVec,
buyTransFeeMtx, sellTransFeeMtx, allocated)
print riskyRetMtx
if __name__ == '__main__':
# testMSPortfolio()
constructModelData()
| gpl-3.0 |
chrisburr/scikit-learn | examples/feature_selection/plot_feature_selection.py | 95 | 2847 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
ChristosChristofidis/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_weights_var_impGBM.py | 1 | 6163 | import sys
sys.path.insert(1, "../../../")
import h2o
import random
def weights_var_imp(ip,port):
# Connect to h2o
h2o.init(ip,port)
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy"],
min_rows=5,
ntrees=5,
max_depth=2)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy"],
training_frame=data2,
min_rows=5*min_rows_scale,
weights_column="weights",
ntrees=5,
max_depth=2)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["economy_20mpg"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=2)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=2)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year"]],
y=data2["cylinders"],
training_frame=data2,
weights_column="weights",
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=2)
reg1_vi = gbm1_regression.varimp(return_list=True)
reg2_vi = gbm2_regression.varimp(return_list=True)
bin1_vi = gbm1_binomial.varimp(return_list=True)
bin2_vi = gbm2_binomial.varimp(return_list=True)
mul1_vi = gbm1_multinomial.varimp(return_list=True)
mul2_vi = gbm2_multinomial.varimp(return_list=True)
print "Varimp (regresson) no weights vs. weights: {0}, {1}".format(reg1_vi, reg2_vi)
print "Varimp (binomial) no weights vs. weights: {0}, {1}".format(bin1_vi, bin2_vi)
print "Varimp (multinomial) no weights vs. weights: {0}, {1}".format(mul1_vi, mul2_vi)
for rvi1, rvi2 in zip(reg1_vi, reg2_vi): assert rvi1 == rvi1, "Expected vi's (regression) to be the same, but got {0}, and {1}".format(rvi1, rvi2)
for bvi1, bvi2 in zip(bin1_vi, bin2_vi): assert bvi1 == bvi1, "Expected vi's (binomial) to be the same, but got {0}, and {1}".format(bvi1, bvi2)
for mvi1, mvi2 in zip(mul1_vi, mul2_vi): assert mvi1 == mvi1, "Expected vi's (multinomial) to be the same, but got {0}, and {1}".format(mvi1, mvi2)
h2o_cars_data = h2o.import_frame(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "\n\nChecking that using uniform weights is equivalent to no weights:"
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "\n\nChecking that using some zero weights is equivalent to removing those observations:"
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "\n\nChecking that doubling some weights is equivalent to doubling those observations:"
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
if __name__ == "__main__":
h2o.run_test(sys.argv, weights_var_imp)
| apache-2.0 |
ilyes14/scikit-learn | sklearn/tests/test_grid_search.py | 53 | 28730 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
rseubert/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
lucashtnguyen/pybmpdb | pybmpdb/summary.py | 1 | 29178 | import os
import sys
from pkg_resources import resource_filename
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import pandas
import openpyxl
from wqio import utils
from wqio import Parameter
from . import dataAccess, info
from statsmodels.tools.decorators import (
resettable_cache, cache_readonly, cache_writable
)
def filterlocation(location, count=5, column='bmp'):
location.filtered_data = (
location.filtered_data
.groupby(level=column)
.filter(lambda g: g.count() >= count)
)
location.include = (
location.filtered_data
.index
.get_level_values(column)
.unique()
.shape[0]
) >= count
def getPFCs(db):
# get BMP Name of pervious friction course (PFC) BMPs
bmpnamecol = 'BMPNAME'
bmptable = 'BMP INFO S02'
bmptypecol = 'TBMPT 2009'
query = """
select [{0}]
FROM [{1}]
WHERE [{2}] = 'PF';
""".format(bmpnamecol, bmptable, bmptypecol)
with db.connect() as cnn:
pfc_names = pandas.read_sql(query, cnn)[bmpnamecol].tolist()
return pfc_names
def _pick_best_station(dataframe):
def best_col(row, mainstation, backupstation, valcol):
try:
if pandas.isnull(row[(mainstation, valcol)]):
return row[(backupstation, valcol)]
else:
return row[(mainstation, valcol)]
except KeyError:
return np.nan
xtab = dataframe.unstack(level='station')
xtab.columns = xtab.columns.swaplevel(0, 1)
xtab[('final_outflow', 'res')] = xtab.apply(
best_col, axis=1, args=('outflow', 'subsurface', 'res')
)
xtab[('final_outflow', 'qual')] = xtab.apply(
best_col, axis=1, args=('outflow', 'subsurface', 'qual')
)
xtab[('final_inflow', 'qual')] = xtab.apply(
best_col, axis=1, args=('inflow', 'reference outflow', 'qual')
)
xtab[('final_inflow', 'res')] = xtab.apply(
best_col, axis=1, args=('inflow', 'reference outflow', 'res')
)
data = (
xtab.select(lambda c: 'final_' in c[0], axis=1)
.rename(columns=lambda col: col.replace('final_', ''))
.stack(level='station')
)
return data
def _pick_best_sampletype(dataframe):
def best_col(row):
if pandas.isnull(row['composite']):
return row[badval]
else:
return np.nan
pivotlevel='sampletype'
badval='grab'
orig_cols = dataframe.columns
xtab = dataframe.unstack(level=pivotlevel)
for col in orig_cols:
xtab[(col, badval)] = xtab[col].apply(best_col, axis=1)
data = (
xtab.select(lambda c: c[1] != 'unknown', axis=1)
.stack(level=['sampletype'])
)
return data
def _filter_onesided_BMPs(dataframe):
grouplevels = ['site', 'bmp', 'parameter', 'category']
pivotlevel = 'station'
xtab = dataframe.unstack(level=pivotlevel)
xgrp = xtab.groupby(level=grouplevels)
data = xgrp.filter(
lambda g: np.all(g['res'].describe().loc['count'] > 0)
)
return data.stack(level=pivotlevel)
def _filter_by_storm_count(dataframe, minstorms):
# filter out all monitoring stations with less than /N/ storms
grouplevels = ['site', 'bmp', 'parameter', 'station']
data = dataframe.groupby(level=grouplevels).filter(
lambda g: g.count()['res'] >= minstorms
)
return data
def _filter_by_BMP_count(dataframe, minbmps):
grouplevels = ['category', 'parameter', 'station']
data = dataframe.groupby(level=grouplevels).filter(
lambda g: g.index.get_level_values('bmp').unique().shape[0] >= minbmps
)
return data
def getSummaryData(dbpath=None, catanalysis=False, astable=False,
minstorms=3, minbmps=3, name=None, useTex=False,
excludedbmps=None, excludedparams=None,
**selection):
'''Select offical data from database.
Parameters
----------
dbpath : string
File path to the BMP Database Access file.
catanalysis : optional bool (default = False)
Filters for data approved for BMP Category-level analysis.
wqanalysis : optional bool (default = False)
Filters for data approvded for individual BMP analysis.
minstorms : option int (default = 3)
The minimum number of storms each group defined by BMP, station,
and parameter should have. Groups with too few storms will be
filtered out.
minstorms : option int (default = 3)
The minimum number of BMPs each group defined
by category, station, and parameter should have.
Groups with too few BMPs will be filtered out.
astable : optional bool (default = False)
Toggles whether the database will be returned
as a pandas.DataFrame (default) or a bmp.Table
object.
excludedbmps : optional list or None (default)
List of BMP Names to exclude form the result.
name : optional string or None (default)
Passed directly to the Table constuctor.
usetex : optional bool (default = False)
Passed directly to the Table constuctor.
**selection : optional keyword arguments
Selection criteria passed directly Database.selectData
Returns
-------
subset : pandas.DataFrame or bmpTable
'''
# main dataset
if dbpath is None:
dbpath = resource_filename("pybmpdb.data", 'bmpdata.csv')
db = dataAccess.Database(dbpath, catanalysis=catanalysis)
# astable must be true here. The input value is respected later
table = db.selectData(astable=True, useTex=useTex, **selection)
# combine NO3+NO2 and NO3 into NOx
nitro_components = [
'Nitrogen, Nitrite (NO2) + Nitrate (NO3) as N',
'Nitrogen, Nitrate (NO3) as N'
]
nitros_exist = table._check_for_parameters(nitro_components)
if nitros_exist:
nitro_combined = 'Nitrogen, NOx as N'
table.unionParamsWithPreference(nitro_components, nitro_combined,
'mg/L')
grab_BMPs = ['Retention Pond', 'Wetland Basin']
if catanalysis:
# merge Wet land Basins and Retention ponds, keeping
# the original records
WBRP_combo = 'Wetland Basin/Retention Pond'
table.redefineBMPCategory(
bmpname=WBRP_combo,
criteria=lambda r: r[0] in grab_BMPs,
dropold=False
)
grab_BMPs.append(WBRP_combo)
# all data should be compisite data, but grabs are allowed
# for bacteria at all BMPs, and all parameter groups at
# retention ponds and wetland basins. Samples of an unknown
# type are excluded
querytxt = (
"(sampletype == 'composite') | ("
"(category in {}) | "
"(paramgroup == 'Biological') "
") & (sampletype != 'unknown')"
).format(grab_BMPs)
subset = table.data.query(querytxt)
if excludedbmps is not None:
# remove all of the PFCs from the dataset
exclude_bmp_query = "bmp not in {}".format(excludedbmps)
subset = subset.query(exclude_bmp_query)
if excludedparams is not None:
exclude_params_query = "parameter not in {}".format(excludedparams)
subset = subset.query(exclude_params_query)
subset = _pick_best_sampletype(subset)
subset = _pick_best_station(subset)
subset = _filter_by_storm_count(subset, minstorms)
subset = _filter_by_BMP_count(subset, minbmps)
subset = _filter_onesided_BMPs(subset)
if astable:
table = dataAccess.Table(subset, name=name, useTex=useTex)
return table, db
else:
return subset, db
def setMPLStyle(serif=False):
if serif:
fontfamily = 'serif'
preamble = [
r'\usepackage{siunitx}',
r'\sisetup{detect-all}',
r'\usepackage{fourier}'
]
else:
fontfamily = 'sans-serif'
preamble = [
r'\usepackage{siunitx}',
r'\sisetup{detect-all}',
r'\usepackage{helvet}',
r'\usepackage{sansmath}',
r'\sansmath'
]
style_dict = {
'text.usetex': True,
'font.family': [fontfamily],
'font.serif': ['Utopia', 'Palantino'],
'font.sans-serif': ['Helvetica', 'Arial'],
'lines.linewidth': 0.5,
'patch.linewidth': 0.5,
'text.latex.preamble': preamble,
'axes.linewidth': 0.5,
'axes.grid': True,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 10,
'xtick.direction': 'out',
'ytick.labelsize': 10,
'ytick.direction': 'out',
'grid.linewidth': 0.5,
'legend.fancybox': True,
'legend.numpoints': 1,
'legend.fontsize': 8,
'figure.figsize': (6.5, 3.5),
'savefig.dpi': 300
}
matplotlib.rcParams.update(style_dict)
class DatasetSummary(object):
def __init__(self, dataset, paramgroup, figpath, forcepaths=False):
self.forcepaths = forcepaths
self.figpath = figpath
self.paramgroup = paramgroup
self.ds = dataset
self.parameter = self.ds.definition['parameter']
self.parameter.usingTex = True
self.bmp = self.ds.definition['category']
# properties
self._latex_file_name = None
self._scatter_fig_path = None
self._scatter_fig_name = None
self._stat_fig_path = None
self._stat_fig_name = None
@property
def latex_file_name(self):
if self._latex_file_name is None:
self._latex_file_name = utils.processFilename('{}_{}_{}'.format(
self.paramgroup, self.bmp, self.parameter.name
)).lower()
return self._latex_file_name
@latex_file_name.setter
def latex_file_name(self, value):
self._latex_file_name = value
@property
def scatter_fig_path(self):
if self._scatter_fig_path is None:
self._scatter_fig_path = self.figpath + '/scatterplot'
if not os.path.exists(self._scatter_fig_path) and self.forcepaths:
os.mkdir(self._scatter_fig_path)
return self._scatter_fig_path
@property
def scatter_fig_name(self):
if self._scatter_fig_name is None:
figname = utils.processFilename('{}_scatter.pdf'.format(self.latex_file_name))
self._scatter_fig_name = self.scatter_fig_path + '/' + figname
return self._scatter_fig_name
@scatter_fig_name.setter
def scatter_fig_name(self, value):
self._scatter_fig_name = value
@property
def stat_fig_path(self):
if self._stat_fig_path is None:
self._stat_fig_path = self.figpath + '/statplot'
if not os.path.exists(self._stat_fig_path) and self.forcepaths:
os.mkdir(self._stat_fig_path)
return self._stat_fig_path
@property
def stat_fig_name(self):
if self._stat_fig_name is None:
figname = utils.processFilename('{}_stats.pdf'.format(self.latex_file_name))
self._stat_fig_name = self.stat_fig_path + '/' + figname
return self._stat_fig_name
@stat_fig_name.setter
def stat_fig_name(self, value):
self._stat_fig_name = value
def _tex_table_row(self, name, attribute, rule='mid', twoval=False,
sigfigs=3, ci=False, fromdataset=False, pval=False,
tex=False, forceint=False):
rulemap = {
'top': '\\toprule',
'mid': '\\midrule',
'bottom': '\\bottomrule',
'none': '%%',
}
try:
thisrule = rulemap[rule]
except KeyError:
raise KeyError('top, mid, bottom rules or none allowed')
if fromdataset:
if self.ds.effluent.include and self.ds.influent.include:
val = utils.sigFigs(getattr(self.ds, attribute), sigfigs,
pval=pval, tex=tex, forceint=forceint)
else:
val = 'NA'
formatter = dict(ruler=thisrule, name=name, value=val)
row = r"""
{ruler}
{name} & \multicolumn{{2}}{{c}} {{{value}}} \\"""
else:
valstrings = []
for loc in [self.ds.influent, self.ds.effluent]:
if loc.include:
if hasattr(attribute, 'append'):
val = [getattr(loc, attr)
for attr in attribute]
else:
val = getattr(loc, attribute)
if val is not None:
if twoval:
thisstring = '{}; {}'.format(
utils.sigFigs(val[0], sigfigs, pval=pval,
tex=tex, forceint=forceint),
utils.sigFigs(val[1], sigfigs, pval=pval,
tex=tex, forceint=forceint)
)
if ci:
thisstring = '({})'.format(thisstring)
else:
thisstring = utils.sigFigs(
val, sigfigs, pval=pval,
tex=tex, forceint=forceint
)
else:
thisstring = 'NA'
else:
thisstring = 'NA'
valstrings.append(thisstring)
formatter = dict(
ruler=thisrule,
name=name,
val_in=valstrings[0],
val_out=valstrings[1]
)
row = r"""
{ruler}
{name} & {val_in} & {val_out} \\"""
return row.format(**formatter)
def _make_tex_table(self, tabletitle):
'''
Generate a LaTeX table comparing the stats of `self.influent`
and `self.effluent`.
Parameters
----------
tabletitle : string
Title of the table as it should appear in a LaTeX document.
Writes
------
Returns
-------
stattable : string
The LaTeX commands for the statsummary table.
'''
#tabletitle = 'Summary of {} at {} BMPs'.format(self.parameter.tex, self.bmpName)
stattable = r"""
\begin{table}[h!]
\caption{%s}
\centering
\begin{tabular}{l l l l l}
\toprule
\textbf{Statistic} & \textbf{Inlet} & \textbf{Outlet} \\""" % tabletitle
stats = [
{'name': 'Count', 'attribute': 'N', 'rule': 'top', 'forceint': True},
{'name': 'Number of NDs', 'attribute': 'ND', 'forceint': True},
#{'name': 'Number of Studies', 'attribute': 'JUNK', 'sigfigs': 0},
{'name': 'Min; Max', 'attribute': ['min', 'max'], 'twoval': True},
{'name': 'Mean', 'attribute': 'mean', },
{'name': '(95\% confidence interval)',
'attribute': 'mean_conf_interval',
'twoval': True, 'ci': True, 'rule':'none'
},
{'name': 'Standard Deviation', 'attribute': 'std', },
{'name': 'Log. Mean', 'attribute': 'logmean', },
{'name': '(95\% confidence interval)',
'attribute': 'logmean_conf_interval',
'twoval': True, 'ci': True, 'rule':'none'
},
{'name': 'Log. Standard Deviation', 'attribute': 'logstd', },
{'name': 'Geo. Mean', 'attribute': 'geomean', },
{'name': '(95\% confidence interval)',
'attribute': 'geomean_conf_interval',
'twoval': True, 'ci': True, 'rule':'none'
},
{'name': 'Coeff. of Variation', 'attribute': 'cov', },
{'name': 'Skewness', 'attribute': 'skew', },
{'name': 'Median', 'attribute': 'median', },
{'name': '(95\% confidence interval)',
'attribute': 'median_conf_interval',
'twoval': True, 'ci': True, 'rule':'none'
},
{'name': 'Quartiles',
'attribute': ['pctl25', 'pctl75'],
'twoval': True,
},
{'name': 'Number of Pairs', 'attribute': 'n_pairs',
'rule': 'top', 'fromdataset': True,
'sigfigs': 1, 'forceint': True
},
{'name': 'Wilcoxon p-value', 'attribute': 'wilcoxon_p',
'fromdataset': True, 'pval': True, 'tex': True
},
{'name': 'Mann-Whitney p-value', 'attribute': 'mannwhitney_p',
'fromdataset': True, 'pval': True, 'tex': True
},
]
for s in stats:
stattable += self._tex_table_row(**s)
stattable += r"""
\bottomrule
\end{tabular}
\end{table}"""
return stattable + '\n'
# doesn't need to be a class method yet
def _make_tex_figure(self, filename, caption, position='hb', clearpage=True):
'''
Create the LaTeX for include a figure in a document
Parameters
----------
filename : string
Path to the image you want to include
caption : string
Caption tp appear below the figure
position : string (default = 'hb')
Valid LaTeX "float" placement preference
(h=here, b=bottom, t=top, !=priority)
clearpage : bool (default = True)
Toggles the LaTeX command "\clearpage" after the figure
Writes
------
None
Returns
-------
figurestring : string
The LaTeX string to include a figure in a document
'''
if clearpage:
clrpage = ' \\clearpage\n'
else:
clrpage = '\n'
figurestring = r"""
\begin{figure}[%s] %% FIGURE
\centering
\includegraphics[scale=1.00]{%s}
\caption{%s}
\end{figure}%s""" % (position, filename, caption, clrpage)
return figurestring
def makeTexInput(self, tabletitle, subsection=True):
'''
Creates an input file for a dataset including a
summary table, stat plot, and scatter plot.
Parameters
----------
figpath : string
Path to teh figure relative to the current directory
subsection : bool (default = True)
Toggles the data going in its own subsection in the document
Writes
------
A full LaTeX input file for inclusion in a final or draft template
Returns
-------
filename : string
Filename and path of the file that is written
'''
tablestring = ''
# if there's enough effluent data
if self.ds.effluent.include:
if subsection:
tablestring += r'\subsection{%s}' % (self.bmp,)
# caption for the stats plot
prob_caption = 'Box and Probability Plots of {} at {} BMPs'.format(
self.parameter.name,
self.bmp
)
# caption for the scatter plot
scatter_caption = 'Influent vs. Effluent Plots of {} at {} BMPs'.format(
self.parameter.name,
self.bmp
)
# warning about having a lot of non-detects
warning = '''
Warning: there is a very high percentage of non-detects in
this data set. The hypothesis test results and other
statistics reported in this table may not be valid.
'''
# make the table and write it to the output file
tablestring += self._make_tex_table(tabletitle)
# if less than 80% of the data is ND
if self.ds.effluent.ND / self.ds.effluent.N <= 0.8:
# make the stat plot string
statfig = self._make_tex_figure(
self.stat_fig_name, prob_caption, clearpage=False
)
# make the scatter plot string
scatterfig = self._make_tex_figure(
self.scatter_fig_name, scatter_caption, clearpage=True
)
# write the strings to the file
tablestring += statfig
tablestring += scatterfig
else:
# if there are too many non-detect,
# issue the warning
tablestring += warning
return tablestring
class CategoricalSummary(object):
def __init__(self, datasets, paramgroup, basepath, figpath,
showprogress=False, applyfilters=False,
filtercount=5, filtercolumn='bmp'):
self._cache = resettable_cache()
self._applyfilters = applyfilters
self.filtercount = filtercount
self.filtercolumn = filtercolumn
self._raw_datasets = [ds for ds in filter(
lambda x: x.effluent.include,
datasets
)]
self.basepath = basepath
self.figpath = figpath
self.showprogress = showprogress
self.parameters = [ds.definition['parameter'] for ds in self.datasets]
self.bmps = [ds.definition['category'] for ds in self.datasets]
self.paramgroup = paramgroup
@cache_readonly
def datasets(self):
if self._applyfilters:
filtered_datasets = []
for ds in self._raw_datasets:
filterlocation(ds.effluent, count=self.filtercount,
column=self.filtercolumn)
filterlocation(ds.influent, count=self.filtercount,
column=self.filtercolumn)
#if ds.n_pairs is None or ds.paired_data is None or ds.n_pairs < self.filtercount:
# ds.include = False
#else:
ds.include = ds.effluent.include
if ds.include:
filtered_datasets.append(ds)
else:
filtered_datasets = self._raw_datasets
return filtered_datasets
def _make_input_file_IO(self, inputIO, regenfigs=True):
figoptions = dict(dpi=600, bbox_inches='tight', transparent=True)
if self.showprogress:
pbar = utils.ProgressBar(self.datasets)
old_param = 'pure garbage'
for n, ds in enumerate(self.datasets, 1):
dsum = DatasetSummary(ds, self.paramgroup, self.figpath)
new_param = dsum.parameter.name
tabletitle = 'Statistics for {} at {} BMPs'.format(
dsum.parameter.paramunit(), dsum.bmp
)
latex_input = ''
if old_param != new_param:
latex_input = '\\section{%s}\n' % dsum.parameter.name
latex_input += dsum.makeTexInput(tabletitle, subsection=True)
latex_input += '\\clearpage\n'
if regenfigs:
statfig = ds.statplot(
ylabel=dsum.parameter.paramunit(),
bacteria=(self.paramgroup=='Bacteria'),
axtype='prob'
)
scatterfig = ds.scatterplot(
xlabel='Influent ' + dsum.parameter.paramunit(),
ylabel='Effluent ' + dsum.parameter.paramunit(),
one2one=True
)
statpath = os.path.join(self.basepath, dsum.stat_fig_name)
statfig.savefig(statpath, **figoptions)
scatterpath = os.path.join(self.basepath, dsum.scatter_fig_name)
scatterfig.savefig(scatterpath, **figoptions)
inputIO.write(latex_input)
plt.close('all')
old_param = new_param
if self.showprogress:
pbar.animate(n)
def _make_report_IO(self, templateIO, inputpath, reportIO, report_title):
inputname = os.path.basename(inputpath)
documentstring = templateIO.read().replace('__VARTITLE', report_title)
documentstring += '\n\\input{%s}\n\\end{document}\n' % (inputname,)
reportIO.write(documentstring)
def makeReport(self, templatepath, inputpath, reportpath, report_title,
regenfigs=True):
with open(inputpath, 'w') as inputIO:
self._make_input_file_IO(inputIO, regenfigs=regenfigs)
with open(templatepath, 'r') as templateIO:
with open(reportpath, 'w') as reportIO:
self._make_report_IO(
templateIO,
inputpath,
reportIO,
report_title
)
def _proxy_inflow_outflow(dataset):
from matplotlib.lines import Line2D
infl_color = dataset.influent.color
infl = Line2D([], [], color=infl_color, linestyle='-', linewidth=1.75,
marker='o', markerfacecolor='white',
markeredgewidth=1.25, markeredgecolor=infl_color)
effl_color = dataset.effluent.color
effl = Line2D([], [], color=effl_color, linestyle='-', linewidth=1.75,
marker='s', markerfacecolor='white',
markeredgewidth=1.25, markeredgecolor=effl_color)
return infl, effl
def parameterBoxplots(datacollection, prefix, bacteria=False):
param = None
bmplabels = datacollection.tidy['category'].unique()
# positions of the ticks
bmppositions = np.arange(1, len(bmplabels) + 1) * 2
pos_map = dict(zip(bmplabels, bmppositions))
for parameter in datacollection.tidy['parameter'].unique():
fig, ax = plt.subplots(figsize=(6.5, 4))
datasets = datacollection.selectDatasets(parameter=parameter)
infl_proxy = None
effl_proxy = None
for n, ds in enumerate(datasets):
pos = pos_map[ds.definition['category']]
if ds is not None:
bp = ds.boxplot(ax=ax, yscale='log', width=0.45,
bacteria=bacteria, pos=pos, offset=0.25)
if infl_proxy is None:
infl_proxy, effl_proxy = _proxy_inflow_outflow(ds)
if param is None:
param = ds.definition['parameter_obj']
ax.set_xticks(bmppositions)
ax.set_xticklabels([x.replace('/', '/\n') for x in bmplabels])
ax.set_ylabel(param.paramunit())
ax.set_xlabel('')
utils.figutils.rotateTickLabels(ax, 45, 'x')
ax.set_xlim(left=1, right=bmppositions.max()+1)
if infl_proxy is not None:
ax.legend(
(infl_proxy, effl_proxy),
('Influent', 'Effluent'),
ncol=2,
frameon=False,
bbox_to_anchor=(1.0, 1.1)
)
fig.tight_layout()
figpath = 'figures/{}_{}_boxplots.png'.format(prefix, parameter.replace(', ', ''))
fig.savefig(figpath, dpi=600, bbox_inches='tight', transparent=True)
plt.close(fig)
def bmpStatSummary(datacollection):
stat_dict = {}
def getNStudies(loc):
return loc.filtered_data.index.get_level_values('bmp').unique().shape[0]
for ds in datacollection.datasets:
key = (ds.definition['parameter'], ds.definition['category'])
stat_dict[key] = {}
for n, loc in zip(['In', 'Out'], [ds.influent, ds.effluent]):
stat_dict[key][('N', n)] = loc.N
stat_dict[key][('N-Studies', n)] = getNStudies(loc)
stat_dict[key][('25th', n)] = loc.pctl25
stat_dict[key][('Median', n)] = loc.median
stat_dict[key][('Median CI low', n)] = loc.median_conf_interval[0]
stat_dict[key][('Median CI high', n)] = loc.median_conf_interval[1]
stat_dict[key][('75th', n)] = loc.pctl75
stat_df = pandas.DataFrame(stat_dict).T
full_index = pandas.MultiIndex.from_product([
stat_df.index.get_level_values(0).unique(),
stat_df.index.get_level_values(1).unique(),
], names=['Parameter', 'BMP Type'])
return stat_df.reindex(full_index)
| bsd-3-clause |
camallen/aggregation | experimental/dkMeansPaper/dk1.py | 2 | 3219 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import cPickle as pickle
import bisect
import csv
import matplotlib.pyplot as plt
import random
import math
import urllib
import matplotlib.cbook as cbook
from scipy.stats.stats import pearsonr
from scipy.stats import beta
def index(a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
raise ValueError
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/classifier")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
sys.path.append("/home/greg/github/reduction/experimental/classifier")
#from divisiveDBSCAN import DivisiveDBSCAN
from divisiveDBSCAN_multi import DivisiveDBSCAN
from divisiveKmeans import DivisiveKmeans
from divisiveKmeans_2 import DivisiveKmeans_2
from kMeans import KMeans
#from kMedoids import KMedoids
#from agglomerativeClustering import agglomerativeClustering
from quadTree import Node
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
client = pymongo.MongoClient()
db = client['penguin_2014-10-22']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
count = 0
for subject_index,subject in enumerate(collection2.find({"metadata.path":{"$regex" : ".*BAILa2014a.*"}})):
path = subject["metadata"]["path"]
#print path
if not("BAILa2014a" in path):
continue
if count == 100:
break
print count
count += 1
user_markings = []
user_ips = []
big_list = []
zooniverse_id = subject["zooniverse_id"]
for r in collection.find({"subjects" : {"$elemMatch": {"zooniverse_id":zooniverse_id}}}):
ip = r["user_ip"]
n = 0
xy_list = []
try:
if isinstance(r["annotations"][1]["value"],dict):
for marking in r["annotations"][1]["value"].values():
if marking["value"] in ["adult","chick"]:
x,y = (float(marking["x"]),float(marking["y"]))
if (x,y,ip) in big_list:
print "--"
continue
big_list.append((x,y,ip))
user_markings.append((x,y))
user_ips.append(ip)
except KeyError:
print r["annotations"]
user_identified_condors,clusters,users = DivisiveKmeans(1).fit2(user_markings,user_ips,debug=True)
#user_identified_condors,clusters,users = DivisiveKmeans_2(1).fit2(user_markings,user_ips,debug=True)
#user_identified_condors,clusters,users = KMedoids(1).fit2(user_markings,user_ips,debug=True)
#user_identified_condors = agglomerativeClustering(zip(user_markings,user_ips))
quadRoot = Node(0,0,1000,750)
for (m,u) in zip(user_markings,user_ips):
quadRoot.__add_point__((m,u))
quadRoot.__ward_traverse__()
break
| apache-2.0 |
exa-analytics/exatomic | exatomic/qe/psp/jobfile.py | 2 | 3543 | ## -*- coding: utf-8 -*-
## Copyright (c) 2015-2020, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
#"""
#PSLibrary Job File
######################
#Job files (extension '.job') are used inside the `pslibrary`_ package to house
#input data for pseudopotential generation using the atomic sub-package ('ld1.x')
#within the `Quantum ESPRESSO`_ quantum chemistry suite of tools. See for example,
#`this`_ job file.
#
#.. _pslibrary: https://github.com/dalcorso/pslibrary
#.. _Quantum ESPRESSO: https://github.com/QEF/q-e
#.. _this: https://github.com/dalcorso/pslibrary/blob/master/paw_ps_collection.job
#"""
#import re
#import numpy as np
#import pandas as pd
#from exa import isotopes, Sections, Parser, TypedProperty, DataFrame
#
#
#class Element(Parser):
# """A single element's input file in the composite job file."""
# _key_config = "config"
# _key_ae_dct = {}
# _key_mrk = "["
# _key_resplit = re.compile("([1-9]*)([spdfghjklmn])([0-9-.]*)")
# _key_symbol = "title"
# _key_zed = "zed"
# _key_ps = "/"
# _key_ps_cols = ("n", "l_sym", "nps", "l", "occupation",
# "energy", "rcut_nc", "rcut", "misc")
# _key_ps_dtypes = [np.int64, "O", np.int64, np.int64, np.float64,
# np.float64, np.float64, np.float64, np.float64]
# ae = TypedProperty(DataFrame)
# ps = TypedProperty(DataFrame)
# z = TypedProperty(int)
# symbol = TypedProperty(str)
#
# def _parse(self):
# if str(self[0]).startswith("#"):
# return
# found = self.find(self._key_config, self._key_symbol,
# self._key_zed, self._key_ps)
# config = found[self._key_config][-1][1].split("=")[1]
# config = config.replace("'", "").replace(",", "").split(" ")
# nvals = []
# angmoms = []
# occs = []
# for item in config:
# if "[" in item:
# continue
# try:
# nval, angmom, occ = self._key_resplit.match(item.lower()).groups()
# nvals.append(nval)
# angmoms.append(angmom)
# occs.append(occ)
# except AttributeError:
# pass
# self.ae = pd.DataFrame.from_dict({'n': nvals, 'l': angmoms, 'occupation': occs})
# self.symbol = found[self._key_symbol][-1][1].split("=")[1].replace("'", "").replace(",", "").title()
# element = getattr(isotopes, self.symbol)
# self.z = element.Z
# ps = []
# for line in self[found[self._key_ps][-1][0]:]:
# if "#" in line:
# continue
# ls = line.split()
# if len(ls) > 7:
# dat = list(self._key_resplit.match(ls[0].lower()).groups())[:-1]
# dat += ls[1:]
# ps.append(dat)
# self.ps = pd.DataFrame(ps, columns=self._key_ps_cols)
# for i, col in enumerate(self.ps.columns):
# self.ps[col] = self.ps[col].astype(self._key_ps_dtypes[i])
#
#
#class PSLJobFile(Sections):
# """Input 'job' file in the pslibrary"""
# name = "pslibrary job file"
# description = "Parser for pslibrary input files"
# _key_sep = "EOF"
# _key_parser = Element
#
# def _parse(self):
# """Parse input data from pslibrary"""
# delims = self.find(self._key_sep, text=False)[self._key_sep]
# starts = delims[::2]
# ends = delims[1::2]
# names = [self._key_parser]*len(ends)
# self._sections_helper(parser=names, start=starts, end=ends)
| apache-2.0 |
hennersz/pySpace | basemap/examples/contour_demo.py | 4 | 4103 | from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import sys
# examples of filled contour plots on map projections.
# read in data on lat/lon grid.
hgt = np.loadtxt('500hgtdata.gz')
lons = np.loadtxt('500hgtlons.gz')
lats = np.loadtxt('500hgtlats.gz')
lons, lats = np.meshgrid(lons, lats)
# create new figure
fig=plt.figure()
# setup of sinusoidal basemap
m = Basemap(resolution='c',projection='sinu',lon_0=0)
# make a filled contour plot.
# create contour lines
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
# fill between contour lines.
CS2 =\
m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,30.)
m.drawmeridians(meridians)
plt.title('Sinusoidal Filled Contour Demo')
sys.stdout.write('plotting with sinusoidal basemap ...\n')
# create new figure
fig=plt.figure()
# setup of mollweide basemap
m = Basemap(resolution='c',projection='moll',lon_0=0)
# make a filled contour plot.
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
CS2 =\
m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,30.)
m.drawmeridians(meridians)
plt.title('Mollweide Filled Contour Demo')
sys.stdout.write('plotting with mollweide basemap ...\n')
# create new figure
fig=plt.figure()
# set up Robinson map projection.
m = Basemap(resolution='c',projection='robin',lon_0=0)
# make a filled contour plot.
CS1 = m.contour(lons,lats,hgt,15,linewidths=0.5,colors='k',latlon=True)
CS2 = m.contourf(lons,lats,hgt,CS1.levels,cmap=plt.cm.jet,extend='both',latlon=True)
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(-60.,90,30.)
m.drawparallels(parallels,labels=[1,0,0,0])
meridians = np.arange(-360.,360.,60.)
m.drawmeridians(meridians,labels=[0,0,0,1])
plt.title('Robinson Filled Contour Demo')
sys.stdout.write('plotting with robinson basemap ...\n')
# create new figure
fig=plt.figure()
# set up map projection (azimuthal equidistant).
m = Basemap(projection='npaeqd',lon_0=-90,boundinglat=15.,resolution='c')
# make a filled contour plot.
x, y = m(lons, lats)
CS1 = m.contour(x,y,hgt,15,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,hgt,CS2.levels,cmap=plt.cm.jet,extend='both')
m.colorbar(CS2,pad='12%') # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.drawmapboundary()
m.fillcontinents()
# draw parallels and meridians.
parallels = np.arange(0.,80,20.)
m.drawparallels(parallels,labels=[0,0,1,1])
meridians = np.arange(10.,360.,20.)
m.drawmeridians(meridians,labels=[1,1,1,1])
plt.title('Azimuthal Equidistant Filled Contour Demo',y=1.075)
sys.stdout.write('plotting with azimuthal equidistant basemap ...\n')
# create new figure
fig=plt.figure()
# setup of orthographic basemap
m = Basemap(resolution='c',projection='ortho',\
lat_0=45.,lon_0=-120.)
# make a filled contour plot.
x, y = m(lons, lats)
CS1 = m.contour(x,y,hgt,15,linewidths=0.5,colors='k')
CS2 = m.contourf(x,y,hgt,CS1.levels,cmap=plt.cm.jet,extend='both')
m.colorbar(CS2) # draw colorbar
# draw coastlines and political boundaries.
m.drawcoastlines()
m.fillcontinents()
m.drawmapboundary()
# draw parallels and meridians.
parallels = np.arange(-80.,90,20.)
m.drawparallels(parallels)
meridians = np.arange(-360.,360.,20.)
m.drawmeridians(meridians)
plt.title('Orthographic Filled Contour Demo')
sys.stdout.write('plotting with orthographic basemap ..\n')
plt.show()
| gpl-3.0 |
kiszk/spark | python/pyspark/sql/types.py | 3 | 65904 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import decimal
import time
import datetime
import calendar
import json
import re
import base64
from array import array
import ctypes
if sys.version >= "3":
long = int
basestring = unicode = str
from py4j.protocol import register_input_converter
from py4j.java_gateway import JavaClass
from pyspark import SparkContext
from pyspark.serializers import CloudPickleSerializer
__all__ = [
"DataType", "NullType", "StringType", "BinaryType", "BooleanType", "DateType",
"TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType",
"LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType"]
class DataType(object):
"""Base class for data types."""
def __repr__(self):
return self.__class__.__name__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def typeName(cls):
return cls.__name__[:-4].lower()
def simpleString(self):
return self.typeName()
def jsonValue(self):
return self.typeName()
def json(self):
return json.dumps(self.jsonValue(),
separators=(',', ':'),
sort_keys=True)
def needConversion(self):
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MapType/StructType.
"""
return False
def toInternal(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def fromInternal(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
# This singleton pattern does not work with pickle, you will get
# another object after pickle and unpickle
class DataTypeSingleton(type):
"""Metaclass for DataType"""
_instances = {}
def __call__(cls):
if cls not in cls._instances:
cls._instances[cls] = super(DataTypeSingleton, cls).__call__()
return cls._instances[cls]
class NullType(DataType):
"""Null type.
The data type representing None, used for the types that cannot be inferred.
"""
__metaclass__ = DataTypeSingleton
class AtomicType(DataType):
"""An internal type used to represent everything that is not
null, UDTs, arrays, structs, and maps."""
class NumericType(AtomicType):
"""Numeric data types.
"""
class IntegralType(NumericType):
"""Integral data types.
"""
__metaclass__ = DataTypeSingleton
class FractionalType(NumericType):
"""Fractional data types.
"""
class StringType(AtomicType):
"""String data type.
"""
__metaclass__ = DataTypeSingleton
class BinaryType(AtomicType):
"""Binary (byte array) data type.
"""
__metaclass__ = DataTypeSingleton
class BooleanType(AtomicType):
"""Boolean data type.
"""
__metaclass__ = DataTypeSingleton
class DateType(AtomicType):
"""Date (datetime.date) data type.
"""
__metaclass__ = DataTypeSingleton
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def needConversion(self):
return True
def toInternal(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def fromInternal(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimestampType(AtomicType):
"""Timestamp (datetime.datetime) data type.
"""
__metaclass__ = DataTypeSingleton
def needConversion(self):
return True
def toInternal(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 1000000 + dt.microsecond
def fromInternal(self, ts):
if ts is not None:
# using int to avoid precision loss in float
return datetime.datetime.fromtimestamp(ts // 1000000).replace(microsecond=ts % 1000000)
class DecimalType(FractionalType):
"""Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the maximum total number of digits (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
"""
def __init__(self, precision=10, scale=0):
self.precision = precision
self.scale = scale
self.hasPrecisionInfo = True # this is public API
def simpleString(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def jsonValue(self):
return "decimal(%d,%d)" % (self.precision, self.scale)
def __repr__(self):
return "DecimalType(%d,%d)" % (self.precision, self.scale)
class DoubleType(FractionalType):
"""Double data type, representing double precision floats.
"""
__metaclass__ = DataTypeSingleton
class FloatType(FractionalType):
"""Float data type, representing single precision floats.
"""
__metaclass__ = DataTypeSingleton
class ByteType(IntegralType):
"""Byte data type, i.e. a signed integer in a single byte.
"""
def simpleString(self):
return 'tinyint'
class IntegerType(IntegralType):
"""Int data type, i.e. a signed 32-bit integer.
"""
def simpleString(self):
return 'int'
class LongType(IntegralType):
"""Long data type, i.e. a signed 64-bit integer.
If the values are beyond the range of [-9223372036854775808, 9223372036854775807],
please use :class:`DecimalType`.
"""
def simpleString(self):
return 'bigint'
class ShortType(IntegralType):
"""Short data type, i.e. a signed 16-bit integer.
"""
def simpleString(self):
return 'smallint'
class ArrayType(DataType):
"""Array data type.
:param elementType: :class:`DataType` of each element in the array.
:param containsNull: boolean, whether the array can contain null (None) values.
"""
def __init__(self, elementType, containsNull=True):
"""
>>> ArrayType(StringType()) == ArrayType(StringType(), True)
True
>>> ArrayType(StringType(), False) == ArrayType(StringType())
False
"""
assert isinstance(elementType, DataType),\
"elementType %s should be an instance of %s" % (elementType, DataType)
self.elementType = elementType
self.containsNull = containsNull
def simpleString(self):
return 'array<%s>' % self.elementType.simpleString()
def __repr__(self):
return "ArrayType(%s,%s)" % (self.elementType,
str(self.containsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"elementType": self.elementType.jsonValue(),
"containsNull": self.containsNull}
@classmethod
def fromJson(cls, json):
return ArrayType(_parse_datatype_json_value(json["elementType"]),
json["containsNull"])
def needConversion(self):
return self.elementType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.toInternal(v) for v in obj]
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and [self.elementType.fromInternal(v) for v in obj]
class MapType(DataType):
"""Map data type.
:param keyType: :class:`DataType` of the keys in the map.
:param valueType: :class:`DataType` of the values in the map.
:param valueContainsNull: indicates whether values can contain null (None) values.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, keyType, valueType, valueContainsNull=True):
"""
>>> (MapType(StringType(), IntegerType())
... == MapType(StringType(), IntegerType(), True))
True
>>> (MapType(StringType(), IntegerType(), False)
... == MapType(StringType(), FloatType()))
False
"""
assert isinstance(keyType, DataType),\
"keyType %s should be an instance of %s" % (keyType, DataType)
assert isinstance(valueType, DataType),\
"valueType %s should be an instance of %s" % (valueType, DataType)
self.keyType = keyType
self.valueType = valueType
self.valueContainsNull = valueContainsNull
def simpleString(self):
return 'map<%s,%s>' % (self.keyType.simpleString(), self.valueType.simpleString())
def __repr__(self):
return "MapType(%s,%s,%s)" % (self.keyType, self.valueType,
str(self.valueContainsNull).lower())
def jsonValue(self):
return {"type": self.typeName(),
"keyType": self.keyType.jsonValue(),
"valueType": self.valueType.jsonValue(),
"valueContainsNull": self.valueContainsNull}
@classmethod
def fromJson(cls, json):
return MapType(_parse_datatype_json_value(json["keyType"]),
_parse_datatype_json_value(json["valueType"]),
json["valueContainsNull"])
def needConversion(self):
return self.keyType.needConversion() or self.valueType.needConversion()
def toInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.toInternal(k), self.valueType.toInternal(v))
for k, v in obj.items())
def fromInternal(self, obj):
if not self.needConversion():
return obj
return obj and dict((self.keyType.fromInternal(k), self.valueType.fromInternal(v))
for k, v in obj.items())
class StructField(DataType):
"""A field in :class:`StructType`.
:param name: string, name of the field.
:param dataType: :class:`DataType` of the field.
:param nullable: boolean, whether the field can be null (None) or not.
:param metadata: a dict from string to simple type that can be toInternald to JSON automatically
"""
def __init__(self, name, dataType, nullable=True, metadata=None):
"""
>>> (StructField("f1", StringType(), True)
... == StructField("f1", StringType(), True))
True
>>> (StructField("f1", StringType(), True)
... == StructField("f2", StringType(), True))
False
"""
assert isinstance(dataType, DataType),\
"dataType %s should be an instance of %s" % (dataType, DataType)
assert isinstance(name, basestring), "field name %s should be string" % (name)
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
self.metadata = metadata or {}
def simpleString(self):
return '%s:%s' % (self.name, self.dataType.simpleString())
def __repr__(self):
return "StructField(%s,%s,%s)" % (self.name, self.dataType,
str(self.nullable).lower())
def jsonValue(self):
return {"name": self.name,
"type": self.dataType.jsonValue(),
"nullable": self.nullable,
"metadata": self.metadata}
@classmethod
def fromJson(cls, json):
return StructField(json["name"],
_parse_datatype_json_value(json["type"]),
json["nullable"],
json["metadata"])
def needConversion(self):
return self.dataType.needConversion()
def toInternal(self, obj):
return self.dataType.toInternal(obj)
def fromInternal(self, obj):
return self.dataType.fromInternal(obj)
def typeName(self):
raise TypeError(
"StructField does not have typeName. "
"Use typeName on its type explicitly instead.")
class StructType(DataType):
"""Struct type, consisting of a list of :class:`StructField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`StructType` will iterate its :class:`StructField`\\s.
A contained :class:`StructField` can be accessed by name or position.
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct1["f1"]
StructField(f1,StringType,true)
>>> struct1[0]
StructField(f1,StringType,true)
"""
def __init__(self, fields=None):
"""
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType([StructField("f1", StringType(), True)])
>>> struct2 = StructType([StructField("f1", StringType(), True),
... StructField("f2", IntegerType(), False)])
>>> struct1 == struct2
False
"""
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, StructField) for f in fields),\
"fields should be a list of StructField"
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
def __iter__(self):
"""Iterate the fields"""
return iter(self.fields)
def __len__(self):
"""Return the number of fields."""
return len(self.fields)
def __getitem__(self, key):
"""Access fields by name or slice."""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No StructField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('StructType index out of range')
elif isinstance(key, slice):
return StructType(self.fields[key])
else:
raise TypeError('StructType keys should be strings, integers or slices')
def simpleString(self):
return 'struct<%s>' % (','.join(f.simpleString() for f in self))
def __repr__(self):
return ("StructType(List(%s))" %
",".join(str(field) for field in self))
def jsonValue(self):
return {"type": self.typeName(),
"fields": [f.jsonValue() for f in self]}
@classmethod
def fromJson(cls, json):
return StructType([StructField.fromJson(f) for f in json["fields"]])
def fieldNames(self):
"""
Returns all field names in a list.
>>> struct = StructType([StructField("f1", StringType(), True)])
>>> struct.fieldNames()
['f1']
"""
return list(self.names)
def needConversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def toInternal(self, obj):
if obj is None:
return
if self._needSerializeAnyField:
# Only calling toInternal function for fields that need conversion
if isinstance(obj, dict):
return tuple(f.toInternal(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
elif isinstance(obj, (tuple, list)):
return tuple(f.toInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(f.toInternal(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._needConversion))
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
else:
if isinstance(obj, dict):
return tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
return tuple(obj[n] for n in self.names)
elif isinstance(obj, (list, tuple)):
return tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with StructType" % obj)
def fromInternal(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._needSerializeAnyField:
# Only calling fromInternal function for fields that need conversion
values = [f.fromInternal(v) if c else v
for f, v, c in zip(self.fields, obj, self._needConversion)]
else:
values = obj
return _create_row(self.names, values)
class UserDefinedType(DataType):
"""User-defined type (UDT).
.. note:: WARN: Spark Internal Use Only
"""
@classmethod
def typeName(cls):
return cls.__name__.lower()
@classmethod
def sqlType(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sqlType().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def scalaUDT(cls):
"""
The class name of the paired Scala UDT (could be '', if there
is no corresponding one).
"""
return ''
def needConversion(self):
return True
@classmethod
def _cachedSqlType(cls):
"""
Cache the sqlType() into class, because it's heavy used in `toInternal`.
"""
if not hasattr(cls, "_cached_sql_type"):
cls._cached_sql_type = cls.sqlType()
return cls._cached_sql_type
def toInternal(self, obj):
if obj is not None:
return self._cachedSqlType().toInternal(self.serialize(obj))
def fromInternal(self, obj):
v = self._cachedSqlType().fromInternal(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement toInternal().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement fromInternal().")
def simpleString(self):
return 'udt'
def json(self):
return json.dumps(self.jsonValue(), separators=(',', ':'), sort_keys=True)
def jsonValue(self):
if self.scalaUDT():
assert self.module() != '__main__', 'UDT in __main__ cannot work with ScalaUDT'
schema = {
"type": "udt",
"class": self.scalaUDT(),
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"sqlType": self.sqlType().jsonValue()
}
else:
ser = CloudPickleSerializer()
b = ser.dumps(type(self))
schema = {
"type": "udt",
"pyClass": "%s.%s" % (self.module(), type(self).__name__),
"serializedClass": base64.b64encode(b).decode('utf8'),
"sqlType": self.sqlType().jsonValue()
}
return schema
@classmethod
def fromJson(cls, json):
pyUDT = str(json["pyClass"]) # convert unicode to str
split = pyUDT.rfind(".")
pyModule = pyUDT[:split]
pyClass = pyUDT[split+1:]
m = __import__(pyModule, globals(), locals(), [pyClass])
if not hasattr(m, pyClass):
s = base64.b64decode(json['serializedClass'].encode('utf-8'))
UDT = CloudPickleSerializer().loads(s)
else:
UDT = getattr(m, pyClass)
return UDT()
def __eq__(self, other):
return type(self) == type(other)
_atomic_types = [StringType, BinaryType, BooleanType, DecimalType, FloatType, DoubleType,
ByteType, ShortType, IntegerType, LongType, DateType, TimestampType, NullType]
_all_atomic_types = dict((t.typeName(), t) for t in _atomic_types)
_all_complex_types = dict((v.typeName(), v)
for v in [ArrayType, MapType, StructType])
_FIXED_DECIMAL = re.compile(r"decimal\(\s*(\d+)\s*,\s*(-?\d+)\s*\)")
def _parse_datatype_string(s):
"""
Parses the given data type string to a :class:`DataType`. The data type string format equals
to :class:`DataType.simpleString`, except that top level struct type can omit
the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use ``byte`` instead
of ``tinyint`` for :class:`ByteType`. We can also use ``int`` as a short name
for :class:`IntegerType`. Since Spark 2.3, this also supports a schema in a DDL-formatted
string and case-insensitive strings.
>>> _parse_datatype_string("int ")
IntegerType
>>> _parse_datatype_string("INT ")
IntegerType
>>> _parse_datatype_string("a: byte, b: decimal( 16 , 8 ) ")
StructType(List(StructField(a,ByteType,true),StructField(b,DecimalType(16,8),true)))
>>> _parse_datatype_string("a DOUBLE, b STRING")
StructType(List(StructField(a,DoubleType,true),StructField(b,StringType,true)))
>>> _parse_datatype_string("a: array< short>")
StructType(List(StructField(a,ArrayType(ShortType,true),true)))
>>> _parse_datatype_string(" map<string , string > ")
MapType(StringType,StringType,true)
>>> # Error cases
>>> _parse_datatype_string("blabla") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("a: int,") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("array<int") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
>>> _parse_datatype_string("map<int, boolean>>") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseException:...
"""
sc = SparkContext._active_spark_context
def from_ddl_schema(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.types.StructType.fromDDL(type_str).json())
def from_ddl_datatype(type_str):
return _parse_datatype_json_string(
sc._jvm.org.apache.spark.sql.api.python.PythonSQLUtils.parseDataType(type_str).json())
try:
# DDL format, "fieldname datatype, fieldname datatype".
return from_ddl_schema(s)
except Exception as e:
try:
# For backwards compatibility, "integer", "struct<fieldname: datatype>" and etc.
return from_ddl_datatype(s)
except:
try:
# For backwards compatibility, "fieldname: datatype, fieldname: datatype" case.
return from_ddl_datatype("struct<%s>" % s.strip())
except:
raise e
def _parse_datatype_json_string(json_string):
"""Parses the given data type JSON string.
>>> import pickle
>>> def check_datatype(datatype):
... pickled = pickle.loads(pickle.dumps(datatype))
... assert datatype == pickled
... scala_datatype = spark._jsparkSession.parseDataType(datatype.json())
... python_datatype = _parse_datatype_json_string(scala_datatype.json())
... assert datatype == python_datatype
>>> for cls in _all_atomic_types.values():
... check_datatype(cls())
>>> # Simple ArrayType.
>>> simple_arraytype = ArrayType(StringType(), True)
>>> check_datatype(simple_arraytype)
>>> # Simple MapType.
>>> simple_maptype = MapType(StringType(), LongType())
>>> check_datatype(simple_maptype)
>>> # Simple StructType.
>>> simple_structtype = StructType([
... StructField("a", DecimalType(), False),
... StructField("b", BooleanType(), True),
... StructField("c", LongType(), True),
... StructField("d", BinaryType(), False)])
>>> check_datatype(simple_structtype)
>>> # Complex StructType.
>>> complex_structtype = StructType([
... StructField("simpleArray", simple_arraytype, True),
... StructField("simpleMap", simple_maptype, True),
... StructField("simpleStruct", simple_structtype, True),
... StructField("boolean", BooleanType(), False),
... StructField("withMeta", DoubleType(), False, {"name": "age"})])
>>> check_datatype(complex_structtype)
>>> # Complex ArrayType.
>>> complex_arraytype = ArrayType(complex_structtype, True)
>>> check_datatype(complex_arraytype)
>>> # Complex MapType.
>>> complex_maptype = MapType(complex_structtype,
... complex_arraytype, False)
>>> check_datatype(complex_maptype)
>>> # Decimal with negative scale.
>>> check_datatype(DecimalType(1,-1))
"""
return _parse_datatype_json_value(json.loads(json_string))
def _parse_datatype_json_value(json_value):
if not isinstance(json_value, dict):
if json_value in _all_atomic_types.keys():
return _all_atomic_types[json_value]()
elif json_value == 'decimal':
return DecimalType()
elif _FIXED_DECIMAL.match(json_value):
m = _FIXED_DECIMAL.match(json_value)
return DecimalType(int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Could not parse datatype: %s" % json_value)
else:
tpe = json_value["type"]
if tpe in _all_complex_types:
return _all_complex_types[tpe].fromJson(json_value)
elif tpe == 'udt':
return UserDefinedType.fromJson(json_value)
else:
raise ValueError("not supported type: %s" % tpe)
# Mapping Python types to Spark SQL DataType
_type_mappings = {
type(None): NullType,
bool: BooleanType,
int: LongType,
float: DoubleType,
str: StringType,
bytearray: BinaryType,
decimal.Decimal: DecimalType,
datetime.date: DateType,
datetime.datetime: TimestampType,
datetime.time: TimestampType,
}
if sys.version < "3":
_type_mappings.update({
unicode: StringType,
long: LongType,
})
# Mapping Python array types to Spark SQL DataType
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it required 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Return the Catalyst datatype from the size of integers.
"""
if size <= 8:
return ByteType
if size <= 16:
return ShortType
if size <= 32:
return IntegerType
if size <= 64:
return LongType
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType,
'd': DoubleType
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
_array_type_mappings['u'] = StringType
# Type code 'c' are only available at python 2
if sys.version_info[0] < 3:
_array_type_mappings['c'] = StringType
# SPARK-21465:
# In python2, array of 'L' happened to be mistakenly partially supported. To
# avoid breaking user's code, we should keep this partial support. Below is a
# dirty hacking to keep this partial support and make the unit test passes
import platform
if sys.version_info[0] < 3 and platform.python_implementation() != 'PyPy':
if 'L' not in _array_type_mappings.keys():
_array_type_mappings['L'] = LongType
_array_unsigned_int_typecode_ctype_mappings['L'] = ctypes.c_uint
def _infer_type(obj):
"""Infer the DataType from obj
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
dataType = _type_mappings.get(type(obj))
if dataType is DecimalType:
# the precision and scale of `obj` may be different from row to row.
return DecimalType(38, 18)
elif dataType is not None:
return dataType()
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key), _infer_type(value), True)
return MapType(NullType(), NullType(), True)
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]), True)
return ArrayType(NullType(), True)
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode](), False)
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""Infer the schema from dict/namedtuple/object"""
if isinstance(row, dict):
items = sorted(row.items())
elif isinstance(row, (tuple, list)):
if hasattr(row, "__fields__"): # Row
items = zip(row.__fields__, tuple(row))
elif hasattr(row, "_fields"): # namedtuple
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [StructField(k, _infer_type(v), True) for k, v in items]
return StructType(fields)
def _has_nulltype(dt):
""" Return whether there is NullType in `dt` or not """
if isinstance(dt, StructType):
return any(_has_nulltype(f.dataType) for f in dt.fields)
elif isinstance(dt, ArrayType):
return _has_nulltype((dt.elementType))
elif isinstance(dt, MapType):
return _has_nulltype(dt.keyType) or _has_nulltype(dt.valueType)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, StructType):
nfs = dict((f.name, f.dataType) for f in b.fields)
fields = [StructField(f.name, _merge_type(f.dataType, nfs.get(f.name, NullType()),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(StructField(n, nfs[n]))
return StructType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.elementType, b.elementType,
name='element in array %s' % name), True)
elif isinstance(a, MapType):
return MapType(_merge_type(a.keyType, b.keyType, name='key of map %s' % name),
_merge_type(a.valueType, b.valueType, name='value of map %s' % name),
True)
else:
return a
def _need_converter(dataType):
if isinstance(dataType, StructType):
return True
elif isinstance(dataType, ArrayType):
return _need_converter(dataType.elementType)
elif isinstance(dataType, MapType):
return _need_converter(dataType.keyType) or _need_converter(dataType.valueType)
elif isinstance(dataType, NullType):
return True
else:
return False
def _create_converter(dataType):
"""Create a converter to drop the names of fields in obj """
if not _need_converter(dataType):
return lambda x: x
if isinstance(dataType, ArrayType):
conv = _create_converter(dataType.elementType)
return lambda row: [conv(v) for v in row]
elif isinstance(dataType, MapType):
kconv = _create_converter(dataType.keyType)
vconv = _create_converter(dataType.valueType)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(dataType, NullType):
return lambda x: None
elif not isinstance(dataType, StructType):
return lambda x: x
# dataType must be StructType
names = [f.name for f in dataType.fields]
converters = [_create_converter(f.dataType) for f in dataType.fields]
convert_fields = any(_need_converter(f.dataType) for f in dataType.fields)
def convert_struct(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_struct
_acceptable_types = {
BooleanType: (bool,),
ByteType: (int, long),
ShortType: (int, long),
IntegerType: (int, long),
LongType: (int, long),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
StringType: (str, unicode),
BinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
StructType: (tuple, list, dict),
}
def _make_type_verifier(dataType, nullable=True, name=None):
"""
Make a verifier that checks the type of obj against dataType and raises a TypeError if they do
not match.
This verifier also checks the value of obj against datatype and raises a ValueError if it's not
within the allowed range, e.g. using 128 as ByteType will overflow. Note that, Python float is
not checked, so it will become infinity when cast to Java float if it overflows.
>>> _make_type_verifier(StructType([]))(None)
>>> _make_type_verifier(StringType())("")
>>> _make_type_verifier(LongType())(0)
>>> _make_type_verifier(LongType())(1 << 64) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ArrayType(ShortType()))(list(range(3)))
>>> _make_type_verifier(ArrayType(StringType()))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({})
>>> _make_type_verifier(StructType([]))(())
>>> _make_type_verifier(StructType([]))([])
>>> _make_type_verifier(StructType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _make_type_verifier(ByteType())(12)
>>> _make_type_verifier(ByteType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(ByteType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(
... ArrayType(ShortType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _make_type_verifier(MapType(StringType(), IntegerType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = StructType().add("a", IntegerType()).add("b", StringType(), False)
>>> _make_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(dataType)
def assert_acceptable_types(obj):
assert _type in _acceptable_types, \
new_msg("unknown datatype: %s for object %r" % (dataType, obj))
def verify_acceptable_types(obj):
# subclass of them can not be fromInternal in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (dataType, obj, type(obj))))
if isinstance(dataType, StringType):
# StringType can work with any types
verify_value = lambda _: _
elif isinstance(dataType, UserDefinedType):
verifier = _make_type_verifier(dataType.sqlType(), name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == dataType):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, dataType)))
verifier(dataType.toInternal(obj))
verify_value = verify_udf
elif isinstance(dataType, ByteType):
def verify_byte(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of ByteType out of range, got: %s" % obj))
verify_value = verify_byte
elif isinstance(dataType, ShortType):
def verify_short(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of ShortType out of range, got: %s" % obj))
verify_value = verify_short
elif isinstance(dataType, IntegerType):
def verify_integer(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntegerType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(dataType, LongType):
def verify_long(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
if obj < -9223372036854775808 or obj > 9223372036854775807:
raise ValueError(
new_msg("object of LongType out of range, got: %s" % obj))
verify_value = verify_long
elif isinstance(dataType, ArrayType):
element_verifier = _make_type_verifier(
dataType.elementType, dataType.containsNull, name="element in array %s" % name)
def verify_array(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(dataType, MapType):
key_verifier = _make_type_verifier(dataType.keyType, False, name="key of map %s" % name)
value_verifier = _make_type_verifier(
dataType.valueType, dataType.valueContainsNull, name="value of map %s" % name)
def verify_map(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(dataType, StructType):
verifiers = []
for f in dataType.fields:
verifier = _make_type_verifier(f.dataType, f.nullable, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_struct(obj):
assert_acceptable_types(obj)
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "__from_dict__", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("StructType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_struct
else:
def verify_default(obj):
assert_acceptable_types(obj)
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
# This is used to unpickle a Row from JVM
def _create_row_inbound_converter(dataType):
return lambda *a: dataType.fromInternal(a)
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in :class:`DataFrame`.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row('name', 'age')>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
This form can also be used to create rows as tuple values, i.e. with unnamed
fields. Beware that such Row objects have different equality semantics:
>>> row1 = Row("Alice", 11)
>>> row2 = Row(name="Alice", age=11)
>>> row1 == row2
False
>>> row3 = Row(a="Alice", b=11)
>>> row1 == row3
True
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__from_dict__ = True
return row
else:
# create row class or objects
return tuple.__new__(self, args)
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
def __contains__(self, item):
if hasattr(self, "__fields__"):
return item in self.__fields__
else:
return super(Row, self).__contains__(item)
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
if len(args) > len(self):
raise ValueError("Can not create Row with fields %s, expected %d values "
"but got %s" % (self, len(self), args))
return _create_row(self, args)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return super(Row, self).__getitem__(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return super(Row, self).__getitem__(idx)
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__' and key != "__from_dict__":
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join("%r" % field for field in self)
class DateConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.date)
def convert(self, obj, gateway_client):
Date = JavaClass("java.sql.Date", gateway_client)
return Date.valueOf(obj.strftime("%Y-%m-%d"))
class DatetimeConverter(object):
def can_convert(self, obj):
return isinstance(obj, datetime.datetime)
def convert(self, obj, gateway_client):
Timestamp = JavaClass("java.sql.Timestamp", gateway_client)
seconds = (calendar.timegm(obj.utctimetuple()) if obj.tzinfo
else time.mktime(obj.timetuple()))
t = Timestamp(int(seconds) * 1000)
t.setNanos(obj.microsecond * 1000)
return t
# datetime is a subclass of date, we should register DatetimeConverter first
register_input_converter(DatetimeConverter())
register_input_converter(DateConverter())
def to_arrow_type(dt):
""" Convert Spark data type to pyarrow type
"""
import pyarrow as pa
if type(dt) == BooleanType:
arrow_type = pa.bool_()
elif type(dt) == ByteType:
arrow_type = pa.int8()
elif type(dt) == ShortType:
arrow_type = pa.int16()
elif type(dt) == IntegerType:
arrow_type = pa.int32()
elif type(dt) == LongType:
arrow_type = pa.int64()
elif type(dt) == FloatType:
arrow_type = pa.float32()
elif type(dt) == DoubleType:
arrow_type = pa.float64()
elif type(dt) == DecimalType:
arrow_type = pa.decimal128(dt.precision, dt.scale)
elif type(dt) == StringType:
arrow_type = pa.string()
elif type(dt) == BinaryType:
arrow_type = pa.binary()
elif type(dt) == DateType:
arrow_type = pa.date32()
elif type(dt) == TimestampType:
# Timestamps should be in UTC, JVM Arrow timestamps require a timezone to be read
arrow_type = pa.timestamp('us', tz='UTC')
elif type(dt) == ArrayType:
if type(dt.elementType) in [StructType, TimestampType]:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
arrow_type = pa.list_(to_arrow_type(dt.elementType))
elif type(dt) == StructType:
if any(type(field.dataType) == StructType for field in dt):
raise TypeError("Nested StructType not supported in conversion to Arrow")
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in dt]
arrow_type = pa.struct(fields)
else:
raise TypeError("Unsupported type in conversion to Arrow: " + str(dt))
return arrow_type
def to_arrow_schema(schema):
""" Convert a schema from Spark to Arrow
"""
import pyarrow as pa
fields = [pa.field(field.name, to_arrow_type(field.dataType), nullable=field.nullable)
for field in schema]
return pa.schema(fields)
def from_arrow_type(at):
""" Convert pyarrow type to Spark data type.
"""
import pyarrow.types as types
if types.is_boolean(at):
spark_type = BooleanType()
elif types.is_int8(at):
spark_type = ByteType()
elif types.is_int16(at):
spark_type = ShortType()
elif types.is_int32(at):
spark_type = IntegerType()
elif types.is_int64(at):
spark_type = LongType()
elif types.is_float32(at):
spark_type = FloatType()
elif types.is_float64(at):
spark_type = DoubleType()
elif types.is_decimal(at):
spark_type = DecimalType(precision=at.precision, scale=at.scale)
elif types.is_string(at):
spark_type = StringType()
elif types.is_binary(at):
spark_type = BinaryType()
elif types.is_date32(at):
spark_type = DateType()
elif types.is_timestamp(at):
spark_type = TimestampType()
elif types.is_list(at):
if types.is_timestamp(at.value_type):
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
spark_type = ArrayType(from_arrow_type(at.value_type))
elif types.is_struct(at):
if any(types.is_struct(field.type) for field in at):
raise TypeError("Nested StructType not supported in conversion from Arrow: " + str(at))
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in at])
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(at))
return spark_type
def from_arrow_schema(arrow_schema):
""" Convert schema from Arrow to Spark.
"""
return StructType(
[StructField(field.name, from_arrow_type(field.type), nullable=field.nullable)
for field in arrow_schema])
def _get_local_timezone():
""" Get local timezone using pytz with environment variable, or dateutil.
If there is a 'TZ' environment variable, pass it to pandas to use pytz and use it as timezone
string, otherwise use the special word 'dateutil/:' which means that pandas uses dateutil and
it reads system configuration to know the system local timezone.
See also:
- https://github.com/pandas-dev/pandas/blob/0.19.x/pandas/tslib.pyx#L1753
- https://github.com/dateutil/dateutil/blob/2.6.1/dateutil/tz/tz.py#L1338
"""
import os
return os.environ.get('TZ', 'dateutil/:')
def _check_series_localize_timestamps(s, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone.
If the input series is not a timestamp series, then the same series is returned. If the input
series is a timestamp series, then a converted series is returned.
:param s: pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series that have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64tz_dtype
tz = timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(tz).dt.tz_localize(None)
else:
return s
def _check_dataframe_localize_timestamps(pdf, timezone):
"""
Convert timezone aware timestamps to timezone-naive in the specified timezone or local timezone
:param pdf: pandas.DataFrame
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.DataFrame where any timezone aware columns have been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
for column, series in pdf.iteritems():
pdf[column] = _check_series_localize_timestamps(series, timezone)
return pdf
def _check_series_convert_timestamps_internal(s, timezone):
"""
Convert a tz-naive timestamp in the specified timezone or local timezone to UTC normalized for
Spark internal storage
:param s: a pandas.Series
:param timezone: the timezone to convert. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been UTC normalized without a time zone
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64_dtype(s.dtype):
# When tz_localize a tz-naive timestamp, the result is ambiguous if the tz-naive
# timestamp is during the hour when the clock is adjusted backward during due to
# daylight saving time (dst).
# E.g., for America/New_York, the clock is adjusted backward on 2015-11-01 2:00 to
# 2015-11-01 1:00 from dst-time to standard time, and therefore, when tz_localize
# a tz-naive timestamp 2015-11-01 1:30 with America/New_York timezone, it can be either
# dst time (2015-01-01 1:30-0400) or standard time (2015-11-01 1:30-0500).
#
# Here we explicit choose to use standard time. This matches the default behavior of
# pytz.
#
# Here are some code to help understand this behavior:
# >>> import datetime
# >>> import pandas as pd
# >>> import pytz
# >>>
# >>> t = datetime.datetime(2015, 11, 1, 1, 30)
# >>> ts = pd.Series([t])
# >>> tz = pytz.timezone('America/New_York')
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=True)
# 0 2015-11-01 01:30:00-04:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> ts.dt.tz_localize(tz, ambiguous=False)
# 0 2015-11-01 01:30:00-05:00
# dtype: datetime64[ns, America/New_York]
# >>>
# >>> str(tz.localize(t))
# '2015-11-01 01:30:00-05:00'
tz = timezone or _get_local_timezone()
return s.dt.tz_localize(tz, ambiguous=False).dt.tz_convert('UTC')
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert('UTC')
else:
return s
def _check_series_convert_timestamps_localize(s, from_timezone, to_timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param from_timezone: the timezone to convert from. if None then use local timezone
:param to_timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
from pyspark.sql.utils import require_minimum_pandas_version
require_minimum_pandas_version()
import pandas as pd
from pandas.api.types import is_datetime64tz_dtype, is_datetime64_dtype
from_tz = from_timezone or _get_local_timezone()
to_tz = to_timezone or _get_local_timezone()
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(to_tz).dt.tz_localize(None)
elif is_datetime64_dtype(s.dtype) and from_tz != to_tz:
# `s.dt.tz_localize('tzlocal()')` doesn't work properly when including NaT.
return s.apply(
lambda ts: ts.tz_localize(from_tz, ambiguous=False).tz_convert(to_tz).tz_localize(None)
if ts is not pd.NaT else pd.NaT)
else:
return s
def _check_series_convert_timestamps_local_tz(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert to. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, None, timezone)
def _check_series_convert_timestamps_tz_local(s, timezone):
"""
Convert timestamp to timezone-naive in the specified timezone or local timezone
:param s: a pandas.Series
:param timezone: the timezone to convert from. if None then use local timezone
:return pandas.Series where if it is a timestamp, has been converted to tz-naive
"""
return _check_series_convert_timestamps_localize(s, timezone, None)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
globs = globals()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession.builder.getOrCreate()
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
Jwuthri/Mozinor | mozinor/pipeline.py | 1 | 5062 | # -*- coding: utf-8 -*-
"""
Created on July 2017
@author: JulienWuthrich
"""
import logging
import datetime
import pydotplus
from io import StringIO
import numpy as np
import pandas as pd
from sklearn.tree import export_graphviz
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from mozinor.config.classifiers import *
from mozinor.config.regressors import *
from mozinor.config.params import *
from mozinor.config.explain import *
from mozinor.settings import logger
class EvaluateModels(object):
def __init__(self, X_train, y_train, X_test, y_test, is_regression, fast=True):
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
self.is_regression = is_regression
self.fast = fast
self.setPipeline()
self.setOptimalNeighbors()
self.setOptimalTrees()
def setPipeline(self):
if self.is_regression:
if self.fast:
self.pipeline = Fast_Regressors
else:
self.pipeline = Regressors
else:
if self.fast:
self.pipeline = Fast_Classifiers
else:
self.pipeline = Classifiers
def setOptimalNeighbors(self):
logger.log("Optimal number of clusters", logging.INFO)
print(Elbow)
self.optimal_neighbors = OptimalNeighbors(self.X_train).compute()
def setOptimalTrees(self):
logger.log("Optimal number of trees", logging.INFO)
print(OobError)
self.optimal_trees = OptimalTrees(
self.X_train, self.y_train, self.is_regression
).compute()
def instanciateEstimator(self, estimator, params):
query = 'from {} import {}'
cquery = query.format(params.get("import"), estimator)
exec(cquery)
return eval(estimator)
def updateDict(self, dictionary, key):
dcopy = dict(dictionary)
del dcopy[key]
return dcopy
def updateParams(self, estimator, params):
logger.log("Estimator {}".format(estimator), logging.INFO)
params = self.updateDict(params, "import")
if estimator in ["KNeighborsClassifier", "KNeighborsRegressor"]:
params["n_neighbors"] = [self.optimal_neighbors]
if "show" in params:
print(params.get("show"))
del params["show"]
return params
def _getBestParams(self, cv):
logger.log(" Best params => {}".format(str(cv.best_params_)), logging.INFO)
logger.log(" Best Score => {0:.3f}".format(abs(cv.best_score_)), logging.INFO)
return cv
def getBestParams(self, cv):
X_train = np.array(self.X_train)
y_train = np.array(self.y_train.ix[:,0])
cv.fit(X_train, y_train)
return self._getBestParams(cv)
def buildRandomizedSearchCV(self, estimator_cls, params):
cv = RandomizedSearchCV(
estimator=estimator_cls(),
param_distributions=params,
n_iter=10,
cv=3,
verbose=1,
n_jobs=1
)
return self.getBestParams(cv)
def buildGridSearchCV(self, estimator_cls, params):
cv = GridSearchCV(
estimator=estimator_cls(),
param_grid=params,
verbose=1,
n_jobs=1
)
return self.getBestParams(cv)
def runPipe(self, estimator_cls, params):
try:
return self.buildRandomizedSearchCV(estimator_cls, params)
except Exception:
return self.buildGridSearchCV(estimator_cls, params)
def showDecisionTree(self, cv):
dot_data = StringIO()
export_graphviz(
cv.best_estimator_, out_file=dot_data,
feature_names=list(self.X_train.columns),
filled=True, rounded=True, special_characters=True
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
filename = str(datetime.datetime.now()).replace(" ", "") + ".png"
graph.write_png(filename)
logger.log("Check the decision tree: {}".format(filename), logging.INFO)
def _makePipe(self, estimator, params):
estimator_cls = self.instanciateEstimator(estimator, params)
params = self.updateParams(estimator, params)
cv = self.runPipe(estimator_cls, params)
if estimator in ["DecisionTreeClassifier", "DecisionTreeRegressor"]:
self.showDecisionTree(cv)
return {
"best_estimator_": cv.best_estimator_,
"best_score_": abs(cv.best_score_),
"best_params_": cv.best_params_
}
def makePipe(self, estimator, params):
try:
return self._makePipe(estimator, params)
except Exception:
return {
"best_estimator_": estimator,
"best_score_": 0
}
def evaluate(self):
d_model_score = dict()
for estimator, params in self.pipeline.items():
d_model_score[estimator] = self.makePipe(estimator, params)
return d_model_score
| mit |
bzero/statsmodels | statsmodels/sandbox/panel/panelmod.py | 27 | 14526 | """
Sandbox Panel Estimators
References
-----------
Baltagi, Badi H. `Econometric Analysis of Panel Data.` 4th ed. Wiley, 2008.
"""
from __future__ import print_function
from statsmodels.compat.python import range, reduce
from statsmodels.tools.tools import categorical
from statsmodels.regression.linear_model import GLS, WLS
import numpy as np
__all__ = ["PanelModel"]
from pandas import LongPanel, __version__
def group(X):
"""
Returns unique numeric values for groups without sorting.
Examples
--------
>>> X = np.array(['a','a','b','c','b','c'])
>>> group(X)
>>> g
array([ 0., 0., 1., 2., 1., 2.])
"""
uniq_dict = {}
group = np.zeros(len(X))
for i in range(len(X)):
if not X[i] in uniq_dict:
uniq_dict.update({X[i] : len(uniq_dict)})
group[i] = uniq_dict[X[i]]
return group
def repanel_cov(groups, sigmas):
'''calculate error covariance matrix for random effects model
Parameters
----------
groups : array, (nobs, nre) or (nobs,)
array of group/category observations
sigma : array, (nre+1,)
array of standard deviations of random effects,
last element is the standard deviation of the
idiosyncratic error
Returns
-------
omega : array, (nobs, nobs)
covariance matrix of error
omegainv : array, (nobs, nobs)
inverse covariance matrix of error
omegainvsqrt : array, (nobs, nobs)
squareroot inverse covariance matrix of error
such that omega = omegainvsqrt * omegainvsqrt.T
Notes
-----
This does not use sparse matrices and constructs nobs by nobs
matrices. Also, omegainvsqrt is not sparse, i.e. elements are non-zero
'''
if groups.ndim == 1:
groups = groups[:,None]
nobs, nre = groups.shape
omega = sigmas[-1]*np.eye(nobs)
for igr in range(nre):
group = groups[:,igr:igr+1]
groupuniq = np.unique(group)
dummygr = sigmas[igr] * (group == groupuniq).astype(float)
omega += np.dot(dummygr, dummygr.T)
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainvhalf = evec/np.sqrt(ev)
return omega, omegainv, omegainvhalf
class PanelData(LongPanel):
pass
class PanelModel(object):
"""
An abstract statistical model class for panel (longitudinal) datasets.
Parameters
---------
endog : array-like or str
If a pandas object is used then endog should be the name of the
endogenous variable as a string.
# exog
# panel_arr
# time_arr
panel_data : pandas.LongPanel object
Notes
-----
If a pandas object is supplied it is assumed that the major_axis is time
and that the minor_axis has the panel variable.
"""
def __init__(self, endog=None, exog=None, panel=None, time=None,
xtnames=None, equation=None, panel_data=None):
if panel_data == None:
# if endog == None and exog == None and panel == None and \
# time == None:
# raise ValueError("If pandel_data is False then endog, exog, \
#panel_arr, and time_arr cannot be None.")
self.initialize(endog, exog, panel, time, xtnames, equation)
# elif aspandas != False:
# if not isinstance(endog, str):
# raise ValueError("If a pandas object is supplied then endog \
#must be a string containing the name of the endogenous variable")
# if not isinstance(aspandas, LongPanel):
# raise ValueError("Only pandas.LongPanel objects are supported")
# self.initialize_pandas(endog, aspandas, panel_name)
def initialize(self, endog, exog, panel, time, xtnames, equation):
"""
Initialize plain array model.
See PanelModel
"""
#TODO: for now, we are going assume a constant, and then make the first
#panel the base, add a flag for this....
# get names
names = equation.split(" ")
self.endog_name = names[0]
exog_names = names[1:] # this makes the order matter in the array
self.panel_name = xtnames[0]
self.time_name = xtnames[1]
novar = exog.var(0) == 0
if True in novar:
cons_index = np.where(novar == 1)[0][0] # constant col. num
exog_names.insert(cons_index, 'cons')
self._cons_index = novar # used again in fit_fixed
self.exog_names = exog_names
self.endog = np.squeeze(np.asarray(endog))
exog = np.asarray(exog)
self.exog = exog
self.panel = np.asarray(panel)
self.time = np.asarray(time)
self.paneluniq = np.unique(panel)
self.timeuniq = np.unique(time)
#TODO: this structure can possibly be extracted somewhat to deal with
#names in general
#TODO: add some dimension checks, etc.
# def initialize_pandas(self, endog, aspandas):
# """
# Initialize pandas objects.
#
# See PanelModel.
# """
# self.aspandas = aspandas
# endog = aspandas[endog].values
# self.endog = np.squeeze(endog)
# exog_name = aspandas.columns.tolist()
# exog_name.remove(endog)
# self.exog = aspandas.filterItems(exog_name).values
#TODO: can the above be simplified to slice notation?
# if panel_name != None:
# self.panel_name = panel_name
# self.exog_name = exog_name
# self.endog_name = endog
# self.time_arr = aspandas.major_axis
#TODO: is time always handled correctly in fromRecords?
# self.panel_arr = aspandas.minor_axis
#TODO: all of this might need to be refactored to explicitly rely (internally)
# on the pandas LongPanel structure for speed and convenience.
# not sure this part is finished...
#TODO: doesn't conform to new initialize
def initialize_pandas(self, panel_data, endog_name, exog_name):
self.panel_data = panel_data
endog = panel_data[endog_name].values # does this create a copy?
self.endog = np.squeeze(endog)
if exog_name == None:
exog_name = panel_data.columns.tolist()
exog_name.remove(endog_name)
self.exog = panel_data.filterItems(exog_name).values # copy?
self._exog_name = exog_name
self._endog_name = endog_name
self._timeseries = panel_data.major_axis # might not need these
self._panelseries = panel_data.minor_axis
#TODO: this could be pulled out and just have a by kwd that takes
# the panel or time array
#TODO: this also needs to be expanded for 'twoway'
def _group_mean(self, X, index='oneway', counts=False, dummies=False):
"""
Get group means of X by time or by panel.
index default is panel
"""
if index == 'oneway':
Y = self.panel
uniq = self.paneluniq
elif index == 'time':
Y = self.time
uniq = self.timeuniq
else:
raise ValueError("index %s not understood" % index)
#TODO: use sparse matrices
dummy = (Y == uniq[:,None]).astype(float)
if X.ndim > 1:
mean = np.dot(dummy,X)/dummy.sum(1)[:,None]
else:
mean = np.dot(dummy,X)/dummy.sum(1)
if counts == False and dummies == False:
return mean
elif counts == True and dummies == False:
return mean, dummy.sum(1)
elif counts == True and dummies == True:
return mean, dummy.sum(1), dummy
elif counts == False and dummies == True:
return mean, dummy
#TODO: Use kwd arguments or have fit_method methods?
def fit(self, model=None, method=None, effects='oneway'):
"""
method : LSDV, demeaned, MLE, GLS, BE, FE, optional
model :
between
fixed
random
pooled
[gmm]
effects :
oneway
time
twoway
femethod : demeaned (only one implemented)
WLS
remethod :
swar -
amemiya
nerlove
walhus
Notes
------
This is unfinished. None of the method arguments work yet.
Only oneway effects should work.
"""
if method: # get rid of this with default
method = method.lower()
model = model.lower()
if method and method not in ["lsdv", "demeaned", "mle", "gls", "be",
"fe"]: # get rid of if method with default
raise ValueError("%s not a valid method" % method)
# if method == "lsdv":
# self.fit_lsdv(model)
if model == 'pooled':
return GLS(self.endog, self.exog).fit()
if model == 'between':
return self._fit_btwn(method, effects)
if model == 'fixed':
return self._fit_fixed(method, effects)
# def fit_lsdv(self, effects):
# """
# Fit using least squares dummy variables.
#
# Notes
# -----
# Should only be used for small `nobs`.
# """
# pdummies = None
# tdummies = None
def _fit_btwn(self, method, effects):
# group mean regression or WLS
if effects != "twoway":
endog = self._group_mean(self.endog, index=effects)
exog = self._group_mean(self.exog, index=effects)
else:
raise ValueError("%s effects is not valid for the between \
estimator" % s)
befit = GLS(endog, exog).fit()
return befit
def _fit_fixed(self, method, effects):
endog = self.endog
exog = self.exog
demeantwice = False
if effects in ["oneway","twoways"]:
if effects == "twoways":
demeantwice = True
effects = "oneway"
endog_mean, counts = self._group_mean(endog, index=effects,
counts=True)
exog_mean = self._group_mean(exog, index=effects)
counts = counts.astype(int)
endog = endog - np.repeat(endog_mean, counts)
exog = exog - np.repeat(exog_mean, counts, axis=0)
if demeantwice or effects == "time":
endog_mean, dummies = self._group_mean(endog, index="time",
dummies=True)
exog_mean = self._group_mean(exog, index="time")
# This allows unbalanced panels
endog = endog - np.dot(endog_mean, dummies)
exog = exog - np.dot(dummies.T, exog_mean)
fefit = GLS(endog, exog[:,-self._cons_index]).fit()
#TODO: might fail with one regressor
return fefit
class SURPanel(PanelModel):
pass
class SEMPanel(PanelModel):
pass
class DynamicPanel(PanelModel):
pass
if __name__ == "__main__":
import pandas
from pandas import LongPanel
import statsmodels.api as sm
import numpy.lib.recfunctions as nprf
data = sm.datasets.grunfeld.load()
# Baltagi doesn't include American Steel
endog = data.endog[:-20]
fullexog = data.exog[:-20]
# fullexog.sort(order=['firm','year'])
panel_arr = nprf.append_fields(fullexog, 'investment', endog, float,
usemask=False)
panel_panda = LongPanel.fromRecords(panel_arr, major_field='year',
minor_field='firm')
# the most cumbersome way of doing it as far as preprocessing by hand
exog = fullexog[['value','capital']].view(float).reshape(-1,2)
exog = sm.add_constant(exog, prepend=False)
panel = group(fullexog['firm'])
year = fullexog['year']
panel_mod = PanelModel(endog, exog, panel, year, xtnames=['firm','year'],
equation='invest value capital')
# note that equation doesn't actually do anything but name the variables
panel_ols = panel_mod.fit(model='pooled')
panel_be = panel_mod.fit(model='between', effects='oneway')
panel_fe = panel_mod.fit(model='fixed', effects='oneway')
panel_bet = panel_mod.fit(model='between', effects='time')
panel_fet = panel_mod.fit(model='fixed', effects='time')
panel_fe2 = panel_mod.fit(model='fixed', effects='twoways')
#see also Baltagi (3rd edt) 3.3 THE RANDOM EFFECTS MODEL p.35
#for explicit formulas for spectral decomposition
#but this works also for unbalanced panel
#
#I also just saw: 9.4.2 The Random Effects Model p.176 which is
#partially almost the same as I did
#
#this needs to use sparse matrices for larger datasets
#
#"""
#
#import numpy as np
#
groups = np.array([0,0,0,1,1,2,2,2])
nobs = groups.shape[0]
groupuniq = np.unique(groups)
periods = np.array([0,1,2,1,2,0,1,2])
perioduniq = np.unique(periods)
dummygr = (groups[:,None] == groupuniq).astype(float)
dummype = (periods[:,None] == perioduniq).astype(float)
sigma = 1.
sigmagr = np.sqrt(2.)
sigmape = np.sqrt(3.)
#dummyall = np.c_[sigma*np.ones((nobs,1)), sigmagr*dummygr,
# sigmape*dummype]
#exclude constant ?
dummyall = np.c_[sigmagr*dummygr, sigmape*dummype]
# omega is the error variance-covariance matrix for the stacked
# observations
omega = np.dot(dummyall, dummyall.T) + sigma* np.eye(nobs)
print(omega)
print(np.linalg.cholesky(omega))
ev, evec = np.linalg.eigh(omega) #eig doesn't work
omegainv = np.dot(evec, (1/ev * evec).T)
omegainv2 = np.linalg.inv(omega)
omegacomp = np.dot(evec, (ev * evec).T)
print(np.max(np.abs(omegacomp - omega)))
#check
#print(np.dot(omegainv,omega)
print(np.max(np.abs(np.dot(omegainv,omega) - np.eye(nobs))))
omegainvhalf = evec/np.sqrt(ev) #not sure whether ev shouldn't be column
print(np.max(np.abs(np.dot(omegainvhalf,omegainvhalf.T) - omegainv)))
# now we can use omegainvhalf in GLS (instead of the cholesky)
sigmas2 = np.array([sigmagr, sigmape, sigma])
groups2 = np.column_stack((groups, periods))
omega_, omegainv_, omegainvhalf_ = repanel_cov(groups2, sigmas2)
print(np.max(np.abs(omega_ - omega)))
print(np.max(np.abs(omegainv_ - omegainv)))
print(np.max(np.abs(omegainvhalf_ - omegainvhalf)))
# notation Baltagi (3rd) section 9.4.1 (Fixed Effects Model)
Pgr = reduce(np.dot,[dummygr,
np.linalg.inv(np.dot(dummygr.T, dummygr)),dummygr.T])
Qgr = np.eye(nobs) - Pgr
# within group effect: np.dot(Qgr, groups)
# but this is not memory efficient, compared to groupstats
print(np.max(np.abs(np.dot(Qgr, groups))))
| bsd-3-clause |
bthirion/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
kazemakase/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 78 | 4510 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
sensbio/sensbiotk | examples/simple_rotation/simple_rotation_martin.py | 1 | 4137 |
# -*- coding: utf-8 -*-
"""
Reconstruction angles example comparison
"""
import numpy as np
from sensbiotk.algorithms import martin_ahrs
import sensbiotk.algorithms.mahony_ahrs as mahony
from sensbiotk.io.iofox import load_foxcsvfile
from sensbiotk.io.ahrs import save_ahrs_csvfile
import sensbiotk.calib.calib as calib
from sensbiotk.transforms3d.eulerangles import quat2euler
from sensbiotk.transforms3d.eulerangles import quat2euler2
from sensbiotk.transforms3d.quaternions import quat2mat
from visual import *
import scipy.io
import matplotlib.pyplot as plt
DATACALIBFILE = "data/simple_rotation_v2/CALIB.csv"
#CALIBFILE= "data/simple_rotation_v2/CalibrationFileIMU5.txt"
#DATAFILE = "data/simple_rotation_v2/ROT90_Y.csv"
CALIBFILE= "data/simple_rotation/CalibrationFileIMU4.txt"
DATAFILE = "data/simple_rotation/90_around_x.csv"
def plot_quat(title, timu, qw, qx, qy, qz):
""" Plot quaternion
"""
plt.figure()
plt.title(title+" Quaternion")
plt.plot(timu, qw)
plt.plot(timu, qx)
plt.plot(timu, qy)
plt.plot(timu, qz)
plt.legend(('qw', 'qx', 'qy', 'qz'))
return
def plot_euler(title, time, phi, theta, psi):
""" Plot euler angles
"""
plt.figure()
plt.title(title+" Euler angles")
plt.plot(time, phi*180/math.pi)
plt.plot(time, theta*180/math.pi)
plt.plot(time, psi*180/math.pi)
plt.legend(('e_x', 'e_y', 'e_z'))
return
def calib_param(compute = False):
""" Load or compute calibration parameters
"""
if compute == True :
[params_acc, params_mag, params_gyr] = \
calib.compute(imuNumber=5 ,filepath=DATACALIBFILE, param = 3)
else:
[params_acc, params_mag, params_gyr] = \
calib.load_param(CALIBFILE)
return [params_acc, params_mag, params_gyr]
def normalize_data(data, param_calib):
""" normalize_data
"""
scale = param_calib[1:4,:]
bias = param_calib[0,:]
data_n = np.transpose(np.dot(scale,np.transpose((data-np.transpose(bias)))))
return data_n
def run_example(typ_filter = "martin"):
""" run example : "mahony" or "martin"
"""
# Compute (True) or load (False
[params_acc, params_mag, params_gyr] = calib_param(compute = False)
# Load the recording data
[time_imu, accx, accy, accz, mx, my, mz, gyrx, gyry, gyrz] = \
load_foxcsvfile(DATAFILE)
acc_imu = np.column_stack([accx, accy, accz])
mag_imu = np.column_stack([mx, my, mz])
gyr_imu = np.column_stack([gyrx, gyry, gyrz])
# Init output
quat = np.zeros((len(acc_imu),4))
euler = np.zeros((len(acc_imu),3))
observer = martin_ahrs.martin_ahrs()
# Computation loop
for i in range(0,len(acc_imu)):
# Applies the Scale and Offset to data
acc_imu[i,:] = normalize_data(acc_imu[i,:], params_acc)
mag_imu[i,:] = normalize_data(mag_imu[i,:], params_mag)
gyr_imu[i,:] = normalize_data(gyr_imu[i,:], params_gyr)
# Filter call
if i == 0:
if typ_filter == "mahony":
quat[0,:]=[1, 0, 0, 0]
else:
quat[0]=observer.init_observer(np.hstack([acc_imu[0,:],mag_imu[0,:], gyr_imu[0,:]]))
else:
if typ_filter == "mahony":
quat[i]= mahony.update(quat[i-1],np.hstack([acc_imu[i,:],mag_imu[i,:], gyr_imu[i,:]]))
euler[i]=quat2euler(quat[i])
else:
quat[i]=observer.update(np.hstack([acc_imu[i,:],mag_imu[i,:], gyr_imu[i,:]]), 0.005)
euler[i]=quat2euler(quat[i])
#Plot results
plot_quat(typ_filter, time_imu,\
quat[:,0], quat[:,1], quat[:,2], quat[:,3])
if typ_filter == "mahony":
plot_euler(typ_filter, time_imu,\
euler[:,2], euler[:,1], euler[:,0])
else:
plot_euler(typ_filter, time_imu,\
euler[:,2], euler[:,1], euler[:,0])
if __name__ == '__main__':
run_example("martin")
run_example("mahony")
plt.show()
| gpl-3.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/numpy/core/tests/test_multiarray.py | 9 | 223106 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = np.dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): # subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += np.dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return np.dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p'))
_view_is_safe(np.dtype('O,O'), np.dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(np.dtype('p,p'), np.dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - np.dtype(otype).itemsize+1):
d2 = np.dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = np.dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
sonnyhu/scipy | doc/source/tutorial/stats/plots/kde_plot4.py | 142 | 1457 | from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
def my_kde_bandwidth(obj, fac=1./5):
"""We use Scott's Rule, multiplied by a constant factor."""
return np.power(obj.n, -1./(obj.d+4)) * fac
loc1, scale1, size1 = (-2, 1, 175)
loc2, scale2, size2 = (2, 0.2, 50)
x2 = np.concatenate([np.random.normal(loc=loc1, scale=scale1, size=size1),
np.random.normal(loc=loc2, scale=scale2, size=size2)])
x_eval = np.linspace(x2.min() - 1, x2.max() + 1, 500)
kde = stats.gaussian_kde(x2)
kde2 = stats.gaussian_kde(x2, bw_method='silverman')
kde3 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.2))
kde4 = stats.gaussian_kde(x2, bw_method=partial(my_kde_bandwidth, fac=0.5))
pdf = stats.norm.pdf
bimodal_pdf = pdf(x_eval, loc=loc1, scale=scale1) * float(size1) / x2.size + \
pdf(x_eval, loc=loc2, scale=scale2) * float(size2) / x2.size
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.plot(x2, np.zeros(x2.shape), 'b+', ms=12)
ax.plot(x_eval, kde(x_eval), 'k-', label="Scott's Rule")
ax.plot(x_eval, kde2(x_eval), 'b-', label="Silverman's Rule")
ax.plot(x_eval, kde3(x_eval), 'g-', label="Scott * 0.2")
ax.plot(x_eval, kde4(x_eval), 'c-', label="Scott * 0.5")
ax.plot(x_eval, bimodal_pdf, 'r--', label="Actual PDF")
ax.set_xlim([x_eval.min(), x_eval.max()])
ax.legend(loc=2)
ax.set_xlabel('x')
ax.set_ylabel('Density')
plt.show()
| bsd-3-clause |
Chilipp/psyplot_gui | psyplot_gui/help_explorer.py | 1 | 40518 | # -*- coding: utf-8 -*-
"""Help explorer widget supplying a simple web browser and a plain text help
viewer"""
import sys
import os.path as osp
from collections import namedtuple
from itertools import chain
import re
import six
import types
import inspect
import shutil
from psyplot.docstring import indent, docstrings
from psyplot.compat.pycompat import OrderedDict
from psyplot.utils import _temp_bool_prop
from psyplot_gui.config.rcsetup import rcParams
from psyplot_gui.compat.qtcompat import (
QWidget, QHBoxLayout, QFrame, QVBoxLayout, QWebEngineView, QToolButton,
QIcon, QtCore, QComboBox, Qt, QSortFilterProxyModel, isstring, asstring,
QCompleter, QStandardItemModel, QPlainTextEdit, QAction, QMenu, with_qt5,
QtGui)
from psyplot_gui.common import get_icon, DockMixin, PyErrorMessage
from IPython.core.oinspect import signature, getdoc
import logging
from psyplot_gui.common import get_module_path, StreamToLogger, \
is_running_tests
from tempfile import mkdtemp
try:
from sphinx.application import Sphinx
from sphinx.util import get_module_source
try:
from psyplot.sphinxext.extended_napoleon import (
ExtendedNumpyDocstring as NumpyDocstring,
ExtendedGoogleDocstring as GoogleDocstring)
except ImportError:
from sphinx.ext.napoleon import NumpyDocstring, GoogleDocstring
with_sphinx = True
except ImportError:
with_sphinx = False
if six.PY2:
from urlparse import urlparse
else:
from urllib.parse import urlparse
try:
import pathlib
def file2html(fname):
return pathlib.Path(fname).as_uri()
except ImportError:
def file2html(fname):
return 'file://' + fname
def html2file(url):
p = urlparse(asstring(url))
# skip the first '/' on windows platform
return osp.abspath(osp.join(p.netloc,
p.path[int(sys.platform == 'win32'):]))
_viewers = OrderedDict()
logger = logging.getLogger(__name__)
class UrlCombo(QComboBox):
"""A editable ComboBox with autocompletion"""
def __init__(self, *args, **kwargs):
super(UrlCombo, self).__init__(*args, **kwargs)
self.setInsertPolicy(self.InsertAtTop)
self.setFocusPolicy(Qt.StrongFocus)
self.setEditable(True)
self.completer = QCompleter(self)
# always show all completions
self.completer.setCompletionMode(QCompleter.UnfilteredPopupCompletion)
self.pFilterModel = QSortFilterProxyModel(self)
self.pFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)
self.completer.setPopup(self.completer.popup())
self.setCompleter(self.completer)
self.lineEdit().textEdited[str].connect(
self.pFilterModel.setFilterFixedString)
self.completer.activated.connect(self.add_text_on_top)
self.setModel(QStandardItemModel())
def setModel(self, model):
"""Reimplemented to also set the model of the filter and completer"""
super(UrlCombo, self).setModel(model)
self.pFilterModel.setSourceModel(model)
self.completer.setModel(self.pFilterModel)
def add_text_on_top(self, text=None, block=False):
"""Add the given text as the first item"""
if text is None:
text = self.currentText()
ind = self.findText(text)
if block:
self.blockSignals(True)
if ind == -1:
self.insertItem(0, text)
elif ind != 0:
self.removeItem(ind)
self.insertItem(0, text)
self.setCurrentIndex(0)
if block:
self.blockSignals(False)
# replace keyPressEvent to always insert the selected item at the top
def keyPressEvent(self, event):
"""Handle key press events"""
if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
self.add_text_on_top()
else:
QComboBox.keyPressEvent(self, event)
class UrlBrowser(QFrame):
"""Very simple browser with session history and autocompletion based upon
the :class:`PyQt5.QtWebEngineWidgets.QWebEngineView` class
Warnings
--------
This class is known to crash under PyQt4 when new web page domains are
loaded. Hence it should be handled with care"""
completed = _temp_bool_prop(
'completed', "Boolean whether the html page loading is completed.",
default=True)
url_like_re = re.compile('^\w+://')
doc_urls = OrderedDict([
('startpage', 'https://startpage.com/'),
('psyplot', 'http://psyplot.readthedocs.org/en/latest/'),
('pyplot', 'http://matplotlib.org/api/pyplot_api.html'),
('seaborn', 'http://stanford.edu/~mwaskom/software/seaborn/api.html'),
('cartopy', 'http://scitools.org.uk/cartopy/docs/latest/index.html'),
('xarray', 'http://xarray.pydata.org/en/stable/'),
('pandas', 'http://pandas.pydata.org/pandas-docs/stable/'),
('numpy', 'https://docs.scipy.org/doc/numpy/reference/routines.html'),
])
#: The initial url showed in the webview. If None, nothing will be
#: displayed
default_url = None
#: adress line
tb_url = None
#: button to go to previous url
bt_back = None
#: button to go to next url
bt_ahead = None
#: refresh the current url
bt_refresh = None
#: button to go lock to the current url
bt_lock = None
#: button to disable browsing in www
bt_url_lock = None
#: The upper part of the browser containing all the buttons
button_box = None
#: The upper most layout aranging the button box and the html widget
vbox = None
#: default value for :attr:`bt_lock`. Is set during browser
#: initialization
bt_lock_default = None
#: default value for :attr:`bt_url_lock`. Is set during browser
#: initialization
bt_url_lock_default = None
def __init__(self, *args, **kwargs):
super(UrlBrowser, self).__init__(*args, **kwargs)
# ---------------------------------------------------------------------
# ---------------------------- upper buttons --------------------------
# ---------------------------------------------------------------------
# adress line
self.tb_url = UrlCombo(self)
# button to go to previous url
self.bt_back = QToolButton(self)
# button to go to next url
self.bt_ahead = QToolButton(self)
# refresh the current url
self.bt_refresh = QToolButton(self)
# button to go lock to the current url
self.bt_lock = QToolButton(self)
# button to disable browsing in www
self.bt_url_lock = QToolButton(self)
# ---------------------------- buttons settings -----------------------
self.bt_back.setIcon(QIcon(get_icon('previous.png')))
self.bt_back.setToolTip('Go back one page')
self.bt_ahead.setIcon(QIcon(get_icon('next.png')))
self.bt_back.setToolTip('Go forward one page')
self.bt_refresh.setIcon(QIcon(get_icon('refresh.png')))
self.bt_refresh.setToolTip('Refresh the current page')
self.bt_lock.setCheckable(True)
self.bt_url_lock.setCheckable(True)
if not with_qt5 and rcParams['help_explorer.online'] is None:
# We now that the browser can crash with Qt4, therefore we disable
# the browing in the internet
self.bt_url_lock.click()
rcParams['help_explorer.online'] = False
elif rcParams['help_explorer.online'] is False:
self.bt_url_lock.click()
elif rcParams['help_explorer.online'] is None:
rcParams['help_explorer.online'] = True
rcParams.connect('help_explorer.online', self.update_url_lock_from_rc)
self.bt_url_lock.clicked.connect(self.toogle_url_lock)
self.bt_lock.clicked.connect(self.toogle_lock)
# tooltip and icons of lock and url_lock are set in toogle_lock and
# toogle_url_lock
self.toogle_lock()
self.toogle_url_lock()
# ---------------------------------------------------------------------
# --------- initialization and connection of the web view -------------
# ---------------------------------------------------------------------
#: The actual widget showing the html content
self.html = QWebEngineView(parent=self)
self.html.loadStarted.connect(self.completed)
self.html.loadFinished.connect(self.completed)
self.tb_url.currentIndexChanged[str].connect(self.browse)
self.bt_back.clicked.connect(self.html.back)
self.bt_ahead.clicked.connect(self.html.forward)
self.bt_refresh.clicked.connect(self.html.reload)
self.html.urlChanged.connect(self.url_changed)
# ---------------------------------------------------------------------
# ---------------------------- layouts --------------------------------
# ---------------------------------------------------------------------
# The upper part of the browser containing all the buttons
self.button_box = button_box = QHBoxLayout()
button_box.addWidget(self.bt_back)
button_box.addWidget(self.bt_ahead)
button_box.addWidget(self.tb_url)
button_box.addWidget(self.bt_refresh)
button_box.addWidget(self.bt_lock)
button_box.addWidget(self.bt_url_lock)
# The upper most layout aranging the button box and the html widget
self.vbox = vbox = QVBoxLayout()
self.vbox.setContentsMargins(0, 0, 0, 0)
vbox.addLayout(button_box)
vbox.addWidget(self.html)
self.setLayout(vbox)
if self.default_url is not None:
self.tb_url.addItem(self.default_url)
self.bt_lock_default = bool(self.bt_lock.isChecked())
self.bt_url_lock_default = bool(self.bt_url_lock.isChecked())
def browse(self, url):
"""Make a web browse on the given url and show the page on the Webview
widget. """
if self.bt_lock.isChecked():
return
if not self.url_like_re.match(url):
url = 'https://' + url
if self.bt_url_lock.isChecked() and url.startswith('http'):
return
if not self.completed:
logger.debug('Stopping current load...')
self.html.stop()
self.completed = True
logger.debug('Loading %s', url)
# we use :meth:`PyQt5.QtWebEngineWidgets.QWebEngineView.setUrl` instead
# of :meth:`PyQt5.QtWebEngineWidgets.QWebEngineView.load` because that
# changes the url directly and is more useful for unittests
self.html.setUrl(QtCore.QUrl(url))
def url_changed(self, url):
"""Triggered when the url is changed to update the adress line"""
try:
url = url.toString()
except AttributeError:
pass
logger.debug('url changed to %s', url)
try:
self.tb_url.setCurrentText(url)
except AttributeError: # Qt4
self.tb_url.setEditText(url)
self.tb_url.add_text_on_top(url, block=True)
def update_url_lock_from_rc(self, online):
if (online and self.bt_url_lock.isChecked() or
not online and not self.bt_url_lock.isChecked()):
self.bt_url_lock.click()
def toogle_url_lock(self):
"""Disable (or enable) the loading of web pages in www"""
bt = self.bt_url_lock
offline = bt.isChecked()
bt.setIcon(QIcon(get_icon(
'world_red.png' if offline else 'world.png')))
online_message = "Go online"
if not with_qt5:
online_message += ("\nWARNING: This mode is unstable under Qt4 "
"and might result in a complete program crash!")
bt.setToolTip(online_message if offline else "Offline mode")
if rcParams['help_explorer.online'] is offline:
rcParams['help_explorer.online'] = not offline
def toogle_lock(self):
"""Disable (or enable) the changing of the current webpage"""
bt = self.bt_lock
bt.setIcon(QIcon(get_icon(
'lock.png' if bt.isChecked() else 'lock_open.png')))
bt.setToolTip("Unlock" if bt.isChecked() else "Lock to current page")
class HelpMixin(object):
"""Base class for providing help on an object"""
#: Object containing the necessary fields to describe an object given to
#: the help widget. The descriptor is set up by the :meth:`describe_object`
#: method.
object_descriptor = namedtuple('ObjectDescriptor', ['obj', 'name'])
#: :class:`bool` determining whether the documentation of an object can be
#: shown or not
can_document_object = True
#: :class:`bool` determining whether this class can show restructured text
can_show_rst = True
@docstrings.get_sectionsf('HelpMixin.show_help')
@docstrings.dedent
def show_help(self, obj, oname='', files=None):
"""
Show the rst documentation for the given object
Parameters
----------
obj: object
The object to get the documentation for
oname: str
The name to use for the object in the documentation
files: list of str
A path to additional files that shall be used to process the
docs"""
descriptor = self.describe_object(obj, oname)
doc = self.get_doc(descriptor)
return self.show_rst(doc, descriptor=descriptor, files=files)
def header(self, descriptor, sig):
"""Format the header and include object name and signature `sig`
Returns
-------
str
The header for the documentation"""
bars = '=' * len(descriptor.name + sig)
return bars + '\n' + descriptor.name + sig + '\n' + bars + '\n'
def describe_object(self, obj, oname=''):
"""Return an instance of the :attr:`object_descriptor` class
Returns
-------
:attr:`object_descriptor`
The descriptor containing the information on the object"""
return self.object_descriptor(obj, oname)
def get_doc(self, descriptor):
"""Get the documentation of the object in the given `descriptor`
Parameters
----------
descriptor: instance of :attr:`object_descriptor`
The descriptor containig the information on the specific object
Returns
-------
str
The header and documentation of the object in the descriptor
Notes
-----
This method uses the :func:`IPython.core.oinspect.getdoc` function to
get the documentation and the :func:`IPython.core.oinspect.signature`
function to get the signature. Those function (different from the
inspect module) do not fail when the object is not saved"""
obj = descriptor.obj
oname = descriptor.name
sig = ''
obj_sig = obj
if callable(obj):
if inspect.isclass(obj):
oname = oname or obj.__name__
obj_sig = getattr(obj, '__init__', obj)
elif six.PY2 and type(obj) is types.InstanceType:
obj_sig = getattr(obj, '__call__', obj)
try:
sig = str(signature(obj_sig))
sig = re.sub('^\(\s*self,\s*', '(', sig)
except:
logger.debug('Failed to get signature from %s!' % (obj, ),
exc_info=True)
oname = oname or type(oname).__name__
head = self.header(descriptor, sig)
lines = []
ds = getdoc(obj)
if ds:
lines.append('')
lines.append(ds)
if inspect.isclass(obj) and hasattr(obj, '__init__'):
init_ds = getdoc(obj.__init__)
if init_ds is not None:
lines.append('\n' + init_ds)
elif hasattr(obj, '__call__'):
call_ds = getdoc(obj.__call__)
if call_ds and call_ds != getdoc(object.__call__):
lines.append('\n' + call_ds)
doc = self.process_docstring(lines, descriptor)
return head + '\n' + doc
def process_docstring(self, lines, descriptor):
"""Make final modification on the rst lines
Returns
-------
str
The docstring"""
return '\n'.join(lines)
@docstrings.get_sectionsf('HelpMixin.show_rst')
@docstrings.dedent
def show_rst(self, text, oname='', descriptor=None, files=None):
"""
Abstract method which needs to be implemented by th widget to show
restructured text
Parameters
----------
text: str
The text to show
oname: str
The object name
descriptor: instance of :attr:`object_descriptor`
The object descriptor holding the informations
files: list of str
A path to additional files that shall be used to display the docs
Returns
-------
bool
True if the text is displayed
"""
return False
@docstrings.get_sectionsf('HelpMixin.show_intro')
def show_intro(self, text=''):
"""
Show an intro message
Parameters
----------
s: str
A string in reStructured Text format to show"""
title = 'Welcome to psyplot!'
title += '\n' + '-' * len(title) + '\n\n'
self.show_rst(title + text, 'intro')
class TextHelp(QFrame, HelpMixin):
"""Class to show plain text rst docstrings"""
def __init__(self, *args, **kwargs):
super(TextHelp, self).__init__(*args, **kwargs)
self.vbox = QVBoxLayout()
self.vbox.setContentsMargins(0, 0, 0, 0)
#: The :class:`PyQt5.QtWidgets.QPlainTextEdit` instance used for
#: displaying the documentation
self.editor = QPlainTextEdit(parent=self)
self.editor.setFont(QtGui.QFont('Courier New'))
self.vbox.addWidget(self.editor)
self.setLayout(self.vbox)
def show_rst(self, text, *args, **kwargs):
"""Show the given text in the editor window
Parameters
----------
text: str
The text to show
``*args,**kwargs``
Are ignored"""
self.editor.clear()
self.editor.insertPlainText(text)
return True
class UrlHelp(UrlBrowser, HelpMixin):
"""Class to convert rst docstrings to html and show browsers"""
#: Object containing the necessary fields to describe an object given to
#: the help widget. The descriptor is set up by the :meth:`describe_object`
#: method and contains an additional objtype attribute
object_descriptor = namedtuple(
'ObjectDescriptor', ['obj', 'name', 'objtype'])
can_document_object = with_sphinx
can_show_rst = with_sphinx
#: menu button with different urls
bt_url_menus = None
#:
sphinx_thread = None
def __init__(self, *args, **kwargs):
self._temp_dir = 'sphinx_dir' not in kwargs
self.sphinx_dir = kwargs.pop('sphinx_dir', mkdtemp(prefix='psyplot_'))
self.build_dir = osp.join(self.sphinx_dir, '_build', 'html')
super(UrlHelp, self).__init__(*args, **kwargs)
self.error_msg = PyErrorMessage(self)
if with_sphinx:
self.sphinx_thread = SphinxThread(self.sphinx_dir)
self.sphinx_thread.html_ready[str].connect(self.browse)
self.sphinx_thread.html_error[str].connect(
self.error_msg.showTraceback)
self.sphinx_thread.html_error[str].connect(logger.debug)
rcParams.connect('help_explorer.render_docs_parallel',
self.reset_sphinx)
rcParams.connect('help_explorer.use_intersphinx',
self.reset_sphinx)
rcParams.connect('help_explorer.online',
self.reset_sphinx)
self.bt_connect_console = QToolButton(self)
self.bt_connect_console.setCheckable(True)
if rcParams['console.connect_to_help']:
self.bt_connect_console.setIcon(QIcon(get_icon(
'ipython_console.png')))
self.bt_connect_console.click()
else:
self.bt_connect_console.setIcon(QIcon(get_icon(
'ipython_console_t.png')))
self.bt_connect_console.clicked.connect(self.toogle_connect_console)
rcParams.connect('console.connect_to_help',
self.update_connect_console)
self.toogle_connect_console()
# menu button with different urls
self.bt_url_menus = QToolButton(self)
self.bt_url_menus.setIcon(QIcon(get_icon('docu_button.png')))
self.bt_url_menus.setToolTip('Browse documentations')
self.bt_url_menus.setPopupMode(QToolButton.InstantPopup)
docu_menu = QMenu(self)
for name, url in six.iteritems(self.doc_urls):
def to_url(b, url=url):
self.browse(url)
action = QAction(name, self)
action.triggered.connect(to_url)
docu_menu.addAction(action)
self.bt_url_menus.setMenu(docu_menu)
self.button_box.addWidget(self.bt_connect_console)
self.button_box.addWidget(self.bt_url_menus)
# toogle the lock again to set the bt_url_menus enabled state
self.toogle_url_lock()
def update_connect_console(self, connect):
if (connect and not self.bt_connect_console.isChecked() or
not connect and self.bt_connect_console.isChecked()):
self.bt_connect_console.click()
def toogle_connect_console(self):
"""Disable (or enable) the loading of web pages in www"""
bt = self.bt_connect_console
connect = bt.isChecked()
bt.setIcon(QIcon(get_icon(
'ipython_console.png' if connect else 'ipython_console_t.png')))
bt.setToolTip("%sonnect the console to the help explorer" % (
"Don't c" if connect else "C"))
if rcParams['console.connect_to_help'] is not connect:
rcParams['console.connect_to_help'] = connect
def reset_sphinx(self, value):
"""Method that is called if the configuration changes"""
if with_sphinx and hasattr(self.sphinx_thread, 'app'):
del self.sphinx_thread.app
@docstrings.dedent
def show_help(self, obj, oname='', files=None):
"""
Render the rst docu for the given object with sphinx and show it
Parameters
----------
%(HelpMixin.show_help.parameters)s
"""
if self.bt_lock.isChecked():
return
return super(UrlHelp, self).show_help(obj, oname=oname, files=files)
@docstrings.dedent
def show_intro(self, text=''):
"""
Show the intro text in the explorer
Parameters
----------
%(HelpMixin.show_intro.parameters)s"""
if self.sphinx_thread is not None:
with open(self.sphinx_thread.index_file, 'a') as f:
f.write('\n' + text.strip() + '\n\n' +
'Table of Contents\n'
'=================\n\n.. toctree::\n')
self.sphinx_thread.render(None, None)
def show_rst(self, text, oname='', descriptor=None, files=None):
"""Render restructured text with sphinx and show it
Parameters
----------
%(HelpMixin.show_rst.parameters)s"""
if self.bt_lock.isChecked() or self.sphinx_thread is None:
return False
if not oname and descriptor:
oname = descriptor.name
for f in files or []:
shutil.copyfile(f, osp.join(self.sphinx_dir, osp.basename(f)))
self.sphinx_thread.render(text, oname)
return True
def describe_object(self, obj, oname=''):
"""Describe an object using additionaly the object type from the
:meth:`get_objtype` method
Returns
-------
instance of :attr:`object_descriptor`
The descriptor of the object"""
return self.object_descriptor(obj, oname, self.get_objtype(obj))
def browse(self, url):
"""Reimplemented to add file paths to the url string"""
url = asstring(url)
html_file = osp.join(self.sphinx_dir, '_build', 'html', url + '.html')
if osp.exists(html_file):
url = file2html(html_file)
super(UrlHelp, self).browse(url)
def toogle_url_lock(self):
"""Disable (or enable) the loading of web pages in www"""
super(UrlHelp, self).toogle_url_lock()
# enable or disable documentation button
bt = self.bt_url_lock
offline = bt.isChecked()
try:
self.bt_url_menus.setEnabled(not offline)
except AttributeError: # not yet initialized
pass
def url_changed(self, url):
"""Reimplemented to remove file paths from the url string"""
try:
url = asstring(url.toString())
except AttributeError:
pass
if url.startswith('file://'):
fname = html2file(url)
if osp.samefile(self.build_dir, osp.commonprefix([
fname, self.build_dir])):
url = osp.splitext(osp.basename(fname))[0]
super(UrlHelp, self).url_changed(url)
def header(self, descriptor, sig):
return '%(name)s\n%(bars)s\n\n.. py:%(type)s:: %(name)s%(sig)s\n' % {
'name': descriptor.name, 'bars': '-' * len(descriptor.name),
'type': descriptor.objtype, 'sig': sig}
def get_objtype(self, obj):
"""Get the object type of the given object and determine wheter the
object is considered a class, a module, a function, method or data
Parameters
----------
obj: object
Returns
-------
str
One out of {'class', 'module', 'function', 'method', 'data'}"""
if inspect.isclass(obj):
return 'class'
if inspect.ismodule(obj):
return 'module'
if inspect.isfunction(obj) or isinstance(obj, type(all)):
return 'function'
if inspect.ismethod(obj) or isinstance(obj, type(str.upper)):
return 'method'
return 'data'
def is_importable(self, modname):
"""Determine whether members of the given module can be documented with
sphinx by using the :func:`sphinx.util.get_module_source` function
Parameters
----------
modname: str
The __name__ attribute of the module to import
Returns
-------
bool
True if sphinx can import the module"""
try:
get_module_source(modname)
return True
except Exception:
return False
def get_doc(self, descriptor):
"""Reimplemented to (potentially) use the features from
sphinx.ext.autodoc"""
obj = descriptor.obj
if inspect.ismodule(obj):
module = obj
else:
module = inspect.getmodule(obj)
if module is not None and (re.match('__.*__', module.__name__) or
not self.is_importable(module.__name__)):
module = None
isclass = inspect.isclass(obj)
# If the module is available, we try to use autodoc
if module is not None:
doc = '.. currentmodule:: ' + module.__name__ + '\n\n'
# a module --> use automodule
if inspect.ismodule(obj):
doc += self.header(descriptor, '')
doc += '.. automodule:: ' + obj.__name__
# an importable class --> use autoclass
elif isclass and getattr(module, obj.__name__, None) is not None:
doc += self.header(descriptor, '')
doc += '.. autoclass:: ' + obj.__name__
# an instance and the class can be imported
# --> use super get_doc and autoclass for the tyoe
elif descriptor.objtype == 'data' and getattr(
module, type(obj).__name__, None) is not None:
doc += '\n\n'.join([
super(UrlHelp, self).get_doc(descriptor),
"Class docstring\n===============",
'.. autoclass:: ' + type(obj).__name__])
# an instance --> use super get_doc for instance and the type
elif descriptor.objtype == 'data':
cls_doc = super(UrlHelp, self).get_doc(self.describe_object(
type(obj), type(obj).__name__))
doc += '\n\n'.join([
super(UrlHelp, self).get_doc(descriptor),
"Class docstring\n===============",
cls_doc])
# a function or method --> use super get_doc
else:
doc += super(UrlHelp, self).get_doc(descriptor)
# otherwise the object has been defined in this session
else:
# an instance --> use super get_doc for instance and the type
if descriptor.objtype == 'data':
cls_doc = super(UrlHelp, self).get_doc(self.describe_object(
type(obj), type(obj).__name__))
doc = '\n\n'.join([
super(UrlHelp, self).get_doc(descriptor),
"Class docstring\n===============",
cls_doc])
# a function or method --> use super get_doc
else:
doc = super(UrlHelp, self).get_doc(descriptor)
return doc.rstrip() + '\n'
def process_docstring(self, lines, descriptor):
"""Process the lines with the napoleon sphinx extension"""
lines = list(chain(*(l.splitlines() for l in lines)))
lines = NumpyDocstring(
lines, what=descriptor.objtype, name=descriptor.name,
obj=descriptor.obj).lines()
lines = GoogleDocstring(
lines, what=descriptor.objtype, name=descriptor.name,
obj=descriptor.obj).lines()
return indent(super(UrlHelp, self).process_docstring(
lines, descriptor))
def close(self, *args, **kwargs):
if kwargs.pop('force', False) or (
not is_running_tests() and self.sphinx_thread is not None):
try:
del self.sphinx_thread.app
except AttributeError:
pass
shutil.rmtree(self.build_dir, ignore_errors=True)
if self._temp_dir:
shutil.rmtree(self.sphinx_dir, ignore_errors=True)
del self.sphinx_thread
return super(UrlHelp, self).close(*args, **kwargs)
elif is_running_tests():
self.bt_url_lock.setChecked(self.bt_url_lock_default)
self.bt_lock.setChecked(self.bt_lock_default)
else:
return super(UrlHelp, self).close(*args, **kwargs)
class SphinxThread(QtCore.QThread):
"""A thread to render sphinx documentation in a separate process"""
#: A signal to be emitted when the rendering finished. The url is the
#: file location
html_ready = QtCore.pyqtSignal(str)
html_error = QtCore.pyqtSignal(str)
def __init__(self, outdir, html_text_no_doc=''):
super(SphinxThread, self).__init__()
self.doc = None
self.name = None
self.html_text_no_doc = html_text_no_doc
self.outdir = outdir
self.index_file = osp.join(self.outdir, 'psyplot.rst')
self.confdir = osp.join(get_module_path(__name__), 'sphinx_supp')
shutil.copyfile(osp.join(self.confdir, 'psyplot.rst'),
osp.join(self.outdir, 'psyplot.rst'))
self.build_dir = osp.join(self.outdir, '_build', 'html')
def render(self, doc, name):
"""Render the given rst string and save the file as ``name + '.rst'``
Parameters
----------
doc: str
The rst docstring
name: str
the name to use for the file"""
if self.wait():
self.doc = doc
self.name = name
# start rendering in separate process
if rcParams['help_explorer.render_docs_parallel']:
self.start()
else:
self.run()
def run(self):
"""Create the html file. When called the first time, it may take a
while because the :class:`sphinx.application.Sphinx` app is build,
potentially with intersphinx
When finished, the html_ready signal is emitted"""
if not hasattr(self, 'app'):
from IPython.core.history import HistoryAccessor
# to avoid history access conflicts between different threads,
# we disable the ipython history
HistoryAccessor.enabled.default_value = False
self.app = Sphinx(self.outdir,
self.confdir,
self.build_dir,
osp.join(self.outdir, '_build', 'doctrees'),
'html',
status=StreamToLogger(logger, logging.DEBUG),
warning=StreamToLogger(logger, logging.DEBUG))
if self.name is not None:
docfile = osp.abspath(osp.join(self.outdir, self.name + '.rst'))
if docfile == self.index_file:
self.name += '1'
docfile = osp.abspath(
osp.join(self.outdir, self.name + '.rst'))
html_file = osp.abspath(osp.join(
self.outdir, '_build', 'html', self.name + '.html'))
if not osp.exists(docfile):
with open(self.index_file, 'a') as f:
f.write('\n ' + self.name)
with open(docfile, 'w') as f:
f.write(self.doc)
else:
html_file = osp.abspath(osp.join(
self.outdir, '_build', 'html', 'psyplot.html'))
try:
self.app.build(None, [])
except Exception:
msg = 'Error while building sphinx document %s' % (
self.name)
self.html_error.emit('<b>' + msg + '</b>')
logger.debug(msg)
else:
self.html_ready.emit(file2html(html_file))
class HelpExplorer(QWidget, DockMixin):
"""A widget for showing the documentation. It behaves somewhat similar
to spyders object inspector plugin and can show restructured text either
as html (if sphinx is installed) or as plain text. It furthermore has a
browser to show html content
Warnings
--------
The :class:`HelpBrowser` class is known to crash under PyQt4 when new web
page domains are loaded. Hence you should disable the browsing to different
remote websites and even disable intersphinx"""
#: The viewer classes used by the help explorer. :class:`HelpExplorer`
#: instances replace this attribute with the corresponding HelpMixin
#: instance
viewers = OrderedDict([('HTML help', UrlHelp), ('Plain text', TextHelp)])
if not rcParams['help_explorer.use_webengineview']:
del viewers['HTML help']
def __init__(self, *args, **kwargs):
super(HelpExplorer, self).__init__(*args, **kwargs)
self.vbox = vbox = QVBoxLayout()
self.combo = QComboBox(parent=self)
vbox.addWidget(self.combo)
if _viewers:
self.viewers = _viewers.copy()
for w in self.viewers.values():
w.setParent(self)
else:
self.viewers = OrderedDict(
[(key, cls(parent=self)) for key, cls in six.iteritems(
self.viewers)])
# save the UrlHelp because QWebEngineView creates child processes
# that are not properly closed by PyQt and as such use too much
# memory
if is_running_tests():
for key, val in self.viewers.items():
_viewers[key] = val
for key, ini in six.iteritems(self.viewers):
self.combo.addItem(key)
ini.hide()
vbox.addWidget(ini)
self.viewer = next(six.itervalues(self.viewers))
self.viewer.show()
self.combo.currentIndexChanged[str].connect(self.set_viewer)
self.setLayout(vbox)
def set_viewer(self, name):
"""Sets the current documentation viewer
Parameters
----------
name: str or object
A string must be one of the :attr:`viewers` attribute. An object
can be one of the values in the :attr:`viewers` attribute"""
if isstring(name) and asstring(name) not in self.viewers:
raise ValueError("Don't have a viewer named %s" % (name, ))
elif not isstring(name):
viewer = name
else:
name = asstring(name)
viewer = self.viewers[name]
self.viewer.hide()
self.viewer = viewer
self.viewer.show()
if (isstring(name) and
not self.combo.currentText() == name):
self.combo.setCurrentIndex(list(self.viewers).index(name))
@docstrings.dedent
def show_help(self, obj, oname='', files=None):
"""
Show the documentaion of the given object
We first try to use the current viewer based upon it's
:attr:`HelpMixin.can_document_object` attribute. If this does not work,
we check the other viewers
Parameters
----------
%(HelpMixin.show_help.parameters)s"""
oname = asstring(oname)
ret = None
if self.viewer.can_document_object:
try:
ret = self.viewer.show_help(obj, oname=oname, files=files)
except Exception:
logger.debug("Could not document %s with %s viewer!",
oname, self.combo.currentText(), exc_info=True)
else:
curr_i = self.combo.currentIndex()
for i, (viewername, viewer) in enumerate(
six.iteritems(self.viewers)):
if i != curr_i and viewer.can_document_object:
self.set_viewer(viewername)
self.combo.blockSignals(True)
self.combo.setCurrentIndex(i)
self.combo.blockSignals(False)
try:
ret = viewer.show_help(obj, oname=oname, files=files)
except Exception:
logger.debug("Could not document %s with %s viewer!",
oname, viewername, exc_info=True)
if ret:
self.parent().raise_()
return ret
@docstrings.dedent
def show_rst(self, text, oname='', files=None):
"""
Show restructured text
We first try to use the current viewer based upon it's
:attr:`HelpMixin.can_show_rst` attribute. If this does not work,
we check the other viewers
Parameters
----------
%(HelpMixin.show_rst.parameters)s"""
ret = None
if self.viewer.can_show_rst:
ret = self.viewer.show_rst(text, oname=oname, files=files)
else:
for viewer in six.itervalues(self.viewers):
if viewer.can_show_rst:
self.set_viewer(viewer)
ret = viewer.show_rst(text, oname=oname, files=files)
break
if ret:
self.parent().raise_()
return ret
@docstrings.dedent
def show_intro(self, text=''):
"""
Show an intro text
We first try to use the current viewer based upon it's
:attr:`HelpMixin.can_show_rst` attribute. If this does not work,
we check the other viewers
Parameters
----------
%(HelpMixin.show_intro.parameters)s"""
found = False
for i, viewer in enumerate(six.itervalues(self.viewers)):
viewer.show_intro(text)
if not found and viewer.can_show_rst:
if i:
self.set_viewer(viewer)
found = True
def close(self, *args, **kwargs):
try:
self.viewers['HTML help'].close(*args, **kwargs)
except (KeyError, AttributeError):
pass
return super(HelpExplorer, self).close(*args, **kwargs)
| gpl-2.0 |
jreback/pandas | pandas/tests/extension/base/setitem.py | 1 | 11423 | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from .base import BaseExtensionTests
class BaseSetitemTests(BaseExtensionTests):
def test_setitem_scalar_series(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[0] = data[1]
assert data[0] == data[1]
def test_setitem_sequence(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[[0, 1]] = [data[1], data[0]]
assert data[0] == original[1]
assert data[1] == original[0]
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
ser = pd.Series(data)
original = ser.copy()
value = [data[0]]
if as_array:
value = data._from_sequence(value)
xpr = "cannot set using a {} indexer with a different length"
with pytest.raises(ValueError, match=xpr.format("list-like")):
ser[[0, 1]] = value
# Ensure no modifications made before the exception
self.assert_series_equal(ser, original)
with pytest.raises(ValueError, match=xpr.format("slice")):
ser[slice(3)] = value
self.assert_series_equal(ser, original)
def test_setitem_empty_indexer(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
original = data.copy()
data[np.array([], dtype=int)] = []
self.assert_equal(data, original)
def test_setitem_sequence_broadcasts(self, data, box_in_series):
if box_in_series:
data = pd.Series(data)
data[[0, 1]] = data[2]
assert data[0] == data[2]
assert data[1] == data[2]
@pytest.mark.parametrize("setter", ["loc", "iloc"])
def test_setitem_scalar(self, data, setter):
arr = pd.Series(data)
setter = getattr(arr, setter)
setter[0] = data[1]
assert arr[0] == data[1]
def test_setitem_loc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.loc[0, "B"] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_loc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.loc[10, "B"] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_mixed(self, data):
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
df.iloc[0, 1] = data[1]
assert df.loc[0, "B"] == data[1]
def test_setitem_iloc_scalar_single(self, data):
df = pd.DataFrame({"B": data})
df.iloc[10, 0] = data[1]
assert df.loc[10, "B"] == data[1]
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
df = pd.DataFrame({"A": data, "B": data})
df.iloc[10, 1] = data[1]
assert df.loc[10, "B"] == data[1]
@pytest.mark.parametrize(
"mask",
[
np.array([True, True, True, False, False]),
pd.array([True, True, True, False, False], dtype="boolean"),
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
],
ids=["numpy-array", "boolean-array", "boolean-array-na"],
)
def test_setitem_mask(self, data, mask, box_in_series):
arr = data[:5].copy()
expected = arr.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[mask] = data[0]
self.assert_equal(expected, arr)
def test_setitem_mask_raises(self, data, box_in_series):
# wrong length
mask = np.array([True, False])
if box_in_series:
data = pd.Series(data)
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError, match="wrong length"):
data[mask] = data[0]
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:3] = True
mask[3:5] = pd.NA
if box_in_series:
data = pd.Series(data)
data[mask] = data[0]
assert (data[:3] == data[0]).all()
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_setitem_integer_array(self, data, idx, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[idx] = arr[0]
self.assert_equal(arr, expected)
@pytest.mark.parametrize(
"idx, box_in_series",
[
([0, 1, 2, pd.NA], False),
pytest.param(
[0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
],
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
)
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
arr = data.copy()
# TODO(xfail) this raises KeyError about labels not found (it tries label-based)
# for list of labels with Series
if box_in_series:
arr = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
msg = "Cannot index with an integer indexer containing NA values"
with pytest.raises(ValueError, match=msg):
arr[idx] = arr[0]
@pytest.mark.parametrize("as_callable", [True, False])
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_aligned(self, data, as_callable, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if as_callable:
mask2 = lambda x: mask
else:
mask2 = mask
if setter:
# loc
target = getattr(ser, setter)
else:
# Series.__setitem__
target = ser
target[mask2] = data[5:7]
ser[mask2] = data[5:7]
assert ser[0] == data[5]
assert ser[1] == data[6]
@pytest.mark.parametrize("setter", ["loc", None])
def test_setitem_mask_broadcast(self, data, setter):
ser = pd.Series(data)
mask = np.zeros(len(data), dtype=bool)
mask[:2] = True
if setter: # loc
target = getattr(ser, setter)
else: # __setitem__
target = ser
target[mask] = data[10]
assert ser[0] == data[10]
assert ser[1] == data[10]
def test_setitem_expand_columns(self, data):
df = pd.DataFrame({"A": data})
result = df.copy()
result["B"] = 1
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
self.assert_frame_equal(result, expected)
result = df.copy()
result.loc[:, "B"] = 1
self.assert_frame_equal(result, expected)
# overwrite with new type
result["B"] = data
expected = pd.DataFrame({"A": data, "B": data})
self.assert_frame_equal(result, expected)
def test_setitem_expand_with_extension(self, data):
df = pd.DataFrame({"A": [1] * len(data)})
result = df.copy()
result["B"] = data
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
self.assert_frame_equal(result, expected)
result = df.copy()
result.loc[:, "B"] = data
self.assert_frame_equal(result, expected)
def test_setitem_frame_invalid_length(self, data):
df = pd.DataFrame({"A": [1] * len(data)})
xpr = (
rf"Length of values \({len(data[:5])}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=xpr):
df["B"] = data[:5]
@pytest.mark.xfail(reason="GH#20441: setitem on extension types.")
def test_setitem_tuple_index(self, data):
s = pd.Series(data[:2], index=[(0, 0), (0, 1)])
expected = pd.Series(data.take([1, 1]), index=s.index)
s[(0, 1)] = data[1]
self.assert_series_equal(s, expected)
def test_setitem_slice(self, data, box_in_series):
arr = data[:5].copy()
expected = data.take([0, 0, 0, 3, 4])
if box_in_series:
arr = pd.Series(arr)
expected = pd.Series(expected)
arr[:3] = data[0]
self.assert_equal(arr, expected)
def test_setitem_loc_iloc_slice(self, data):
arr = data[:5].copy()
s = pd.Series(arr, index=["a", "b", "c", "d", "e"])
expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)
result = s.copy()
result.iloc[:3] = data[0]
self.assert_equal(result, expected)
result = s.copy()
result.loc[:"c"] = data[0]
self.assert_equal(result, expected)
def test_setitem_slice_mismatch_length_raises(self, data):
arr = data[:5]
with pytest.raises(ValueError):
arr[:1] = arr[:2]
def test_setitem_slice_array(self, data):
arr = data[:5].copy()
arr[:5] = data[-5:]
self.assert_extension_array_equal(arr, data[-5:])
def test_setitem_scalar_key_sequence_raise(self, data):
arr = data[:5].copy()
with pytest.raises(ValueError):
arr[0] = arr[[0, 1]]
def test_setitem_preserves_views(self, data):
# GH#28150 setitem shouldn't swap the underlying data
view1 = data.view()
view2 = data[:]
data[0] = data[1]
assert view1[0] == data[1]
assert view2[0] == data[1]
def test_setitem_dataframe_column_with_index(self, data):
# https://github.com/pandas-dev/pandas/issues/32395
df = expected = pd.DataFrame({"data": pd.Series(data)})
result = pd.DataFrame(index=df.index)
result.loc[df.index, "data"] = df["data"]
self.assert_frame_equal(result, expected)
def test_setitem_dataframe_column_without_index(self, data):
# https://github.com/pandas-dev/pandas/issues/32395
df = expected = pd.DataFrame({"data": pd.Series(data)})
result = pd.DataFrame(index=df.index)
result.loc[:, "data"] = df["data"]
self.assert_frame_equal(result, expected)
def test_setitem_series_with_index(self, data):
# https://github.com/pandas-dev/pandas/issues/32395
ser = expected = pd.Series(data, name="data")
result = pd.Series(index=ser.index, dtype=object, name="data")
result.loc[ser.index] = ser
self.assert_series_equal(result, expected)
def test_setitem_series_without_index(self, data):
# https://github.com/pandas-dev/pandas/issues/32395
ser = expected = pd.Series(data, name="data")
result = pd.Series(index=ser.index, dtype=object, name="data")
result.loc[:] = ser
self.assert_series_equal(result, expected)
| bsd-3-clause |
bloyl/mne-python | mne/preprocessing/tests/test_ica.py | 4 | 57315 | # Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from contextlib import nullcontext
from itertools import product
import os
import os.path as op
import shutil
from unittest import SkipTest
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from scipy import stats, linalg
from scipy.io import loadmat, savemat
import matplotlib.pyplot as plt
from mne import (Epochs, read_events, pick_types, create_info, EpochsArray,
EvokedArray, Annotations, pick_channels_regexp,
make_ad_hoc_cov)
from mne.cov import read_cov
from mne.preprocessing import (ICA as _ICA, ica_find_ecg_events,
ica_find_eog_events, read_ica)
from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components,
_ica_explained_variance, read_ica_eeglab)
from mne.io import read_raw_fif, Info, RawArray, read_raw_ctf, read_raw_eeglab
from mne.io.pick import _DATA_CH_TYPES_SPLIT, get_channel_type_constants
from mne.io.eeglab.eeglab import _check_load_mat
from mne.rank import _compute_rank_int
from mne.utils import catch_logging, requires_sklearn
from mne.datasets import testing
from mne.event import make_fixed_length_events
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
test_cov_name = op.join(data_dir, 'test-cov.fif')
test_base_dir = testing.data_path(download=False)
ctf_fname = op.join(test_base_dir, 'CTF', 'testdata_ctf.ds')
fif_fname = op.join(test_base_dir, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
eeglab_fname = op.join(test_base_dir, 'EEGLAB', 'test_raw.set')
eeglab_montage = op.join(test_base_dir, 'EEGLAB', 'test_chans.locs')
ctf_fname2 = op.join(test_base_dir, 'CTF', 'catch-alp-good-f.ds')
event_id, tmin, tmax = 1, -0.2, 0.2
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 6
score_funcs_unsuited = ['pointbiserialr', 'ansari']
def ICA(*args, **kwargs):
"""Fix the random state in tests."""
if 'random_state' not in kwargs:
kwargs['random_state'] = 0
return _ICA(*args, **kwargs)
def _skip_check_picard(method):
if method == 'picard':
try:
import picard # noqa, analysis:ignore
except Exception as exp:
raise SkipTest("Picard is not installed (%s)." % (exp,))
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_full_data_recovery(method):
"""Test recovery of full data when no source is rejected."""
# Most basic recovery
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
raw.info['projs'] = []
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks,
baseline=None, preload=True)
evoked = epochs.average()
n_channels = 5
data = raw._data[:n_channels].copy()
data_epochs = epochs.get_data()
data_evoked = evoked.data
raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
methods = [method]
for method in methods:
stuff = [(2, n_channels, True), (2, n_channels // 2, False)]
for n_components, n_pca_components, ok in stuff:
ica = ICA(n_components=n_components, random_state=0,
method=method, max_iter=1)
kwargs = dict(exclude=[], n_pca_components=n_pca_components)
picks = list(range(n_channels))
with pytest.warns(UserWarning, match=None): # sometimes warns
ica.fit(raw, picks=picks)
_assert_ica_attributes(ica, raw.get_data(picks))
raw2 = ica.apply(raw.copy(), **kwargs)
if ok:
assert_allclose(data[:n_channels], raw2._data[:n_channels],
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data[:n_channels] - raw2._data[:n_channels])
assert (np.max(diff) > 1e-14)
ica = ICA(n_components=n_components, method=method,
random_state=0)
with pytest.warns(None): # sometimes warns
ica.fit(epochs, picks=picks)
_assert_ica_attributes(ica, epochs.get_data(picks))
epochs2 = ica.apply(epochs.copy(), **kwargs)
data2 = epochs2.get_data()[:, :n_channels]
if ok:
assert_allclose(data_epochs[:, :n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(data_epochs[:, :n_channels] - data2)
assert (np.max(diff) > 1e-14)
evoked2 = ica.apply(evoked.copy(), **kwargs)
data2 = evoked2.data[:n_channels]
if ok:
assert_allclose(data_evoked[:n_channels], data2,
rtol=1e-10, atol=1e-15)
else:
diff = np.abs(evoked.data[:n_channels] - data2)
assert (np.max(diff) > 1e-14)
with pytest.raises(ValueError, match='Invalid value'):
ICA(method='pizza-decomposision')
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_simple(method):
"""Test that ICA recovers the unmixing matrix in a simple case."""
if method == "fastica":
try:
import sklearn # noqa: F401
except ImportError:
raise SkipTest("scikit-learn not installed")
_skip_check_picard(method)
n_components = 3
n_samples = 1000
rng = np.random.RandomState(0)
S = rng.laplace(size=(n_components, n_samples))
A = rng.randn(n_components, n_components)
data = np.dot(A, S)
info = create_info(data.shape[-2], 1000., 'eeg')
cov = make_ad_hoc_cov(info)
ica = ICA(n_components=n_components, method=method, random_state=0,
noise_cov=cov)
with pytest.warns(RuntimeWarning, match='No average EEG.*'):
ica.fit(RawArray(data, info))
transform = ica.unmixing_matrix_ @ ica.pca_components_ @ A
amari_distance = np.mean(np.sum(np.abs(transform), axis=1) /
np.max(np.abs(transform), axis=1) - 1.)
assert amari_distance < 0.1
def test_warnings():
"""Test that ICA warns on certain input data conditions."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = read_events(event_name)
epochs = Epochs(raw, events=events, baseline=None, preload=True)
ica = ICA(n_components=2, max_iter=1, method='infomax', random_state=0)
# not high-passed
epochs.info['highpass'] = 0.
with pytest.warns(RuntimeWarning, match='should be high-pass filtered'):
ica.fit(epochs)
# baselined
epochs.info['highpass'] = 1.
epochs.baseline = (epochs.tmin, 0)
with pytest.warns(RuntimeWarning, match='epochs.*were baseline-corrected'):
ica.fit(epochs)
# cleaning baseline-corrected data
epochs.info['highpass'] = 1.
epochs.baseline = None
ica.fit(epochs)
epochs.baseline = (epochs.tmin, 0)
with pytest.warns(RuntimeWarning, match='consider baseline-correcting.*'
'again'):
ica.apply(epochs)
@requires_sklearn
@pytest.mark.parametrize('n_components', (None, 0.9999, 8, 9, 10))
@pytest.mark.parametrize('n_pca_components', [8, 9, 0.9999, 10])
@pytest.mark.filterwarnings('ignore:FastICA did not converge.*:UserWarning')
def test_ica_noop(n_components, n_pca_components, tmpdir):
"""Test that our ICA is stable even with a bad max_pca_components."""
data = np.random.RandomState(0).randn(10, 1000)
info = create_info(10, 1000., 'eeg')
raw = RawArray(data, info)
raw.set_eeg_reference()
raw.info['highpass'] = 1.0 # fake high-pass filtering
assert np.linalg.matrix_rank(raw.get_data()) == 9
kwargs = dict(n_components=n_components, verbose=True)
if isinstance(n_components, int) and \
isinstance(n_pca_components, int) and \
n_components > n_pca_components:
return
ica = ICA(**kwargs)
ica.n_pca_components = n_pca_components # backward compat
if n_components == 10 and n_pca_components == 0.9999:
with pytest.raises(RuntimeError, match='.*requires.*PCA.*'):
ica.fit(raw)
return
if n_components == 10 and n_pca_components == 10:
ctx = pytest.warns(RuntimeWarning, match='.*unstable.*integer <= 9')
bad = True # pinv will fail
elif n_components == 0.9999 and n_pca_components == 8:
ctx = pytest.raises(RuntimeError, match='requires 9 PCA values.*but')
bad = 'exit'
else:
bad = False # pinv will not fail
ctx = nullcontext()
with ctx:
ica.fit(raw)
assert ica._max_pca_components is None
if bad == 'exit':
return
raw_new = ica.apply(raw.copy())
# 8 components is not a no-op; "bad" means our pinv has failed
if n_pca_components == 8 or bad:
assert ica.n_pca_components == n_pca_components
assert not np.allclose(raw.get_data(), raw_new.get_data(), atol=0)
return
assert_allclose(raw.get_data(), raw_new.get_data(), err_msg='Id failure')
_assert_ica_attributes(ica, data)
# and with I/O
fname = tmpdir.join('temp-ica.fif')
ica.save(fname)
ica = read_ica(fname)
raw_new = ica.apply(raw.copy())
assert_allclose(raw.get_data(), raw_new.get_data(), err_msg='I/O failure')
_assert_ica_attributes(ica)
@requires_sklearn
@pytest.mark.parametrize("method, max_iter_default", [("fastica", 1000),
("infomax", 500), ("picard", 500)])
def test_ica_max_iter_(method, max_iter_default):
"""Test that ICA.max_iter is set to the right defaults."""
_skip_check_picard(method)
# check that new defaults come out for 'auto'
ica = ICA(n_components=3, method=method, max_iter='auto')
assert ica.max_iter == max_iter_default
# check that user input comes out unchanged
ica = ICA(n_components=3, method=method, max_iter=2000)
assert ica.max_iter == 2000
with pytest.raises(ValueError, match='Invalid'):
ICA(max_iter='foo')
with pytest.raises(TypeError, match='must be an instance'):
ICA(max_iter=1.)
@requires_sklearn
@pytest.mark.parametrize("method", ["infomax", "fastica", "picard"])
def test_ica_n_iter_(method, tmpdir):
"""Test that ICA.n_iter_ is set after fitting."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
n_components = 3
max_iter = 1
ica = ICA(n_components=n_components, max_iter=max_iter, method=method,
random_state=0)
if method == 'infomax':
ica.fit(raw)
else:
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
assert ica.method == method
assert_equal(ica.n_iter_, max_iter)
# Test I/O roundtrip.
output_fname = tmpdir.join('test_ica-ica.fif')
_assert_ica_attributes(ica, raw.get_data('data'), limits=(5, 110))
ica.save(output_fname)
ica = read_ica(output_fname)
assert ica.method == method
_assert_ica_attributes(ica)
assert_equal(ica.n_iter_, max_iter)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_rank_reduction(method):
"""Test recovery ICA rank reduction."""
_skip_check_picard(method)
# Most basic recovery
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
n_components = 5
for n_pca_components in [6, 10]:
with pytest.warns(UserWarning, match='did not converge'):
ica = ICA(n_components=n_components,
method=method, max_iter=1).fit(raw, picks=picks)
rank_before = _compute_rank_int(raw.copy().pick(picks), proj=False)
assert_equal(rank_before, len(picks))
raw_clean = ica.apply(raw.copy(), n_pca_components=n_pca_components)
rank_after = _compute_rank_int(raw_clean.copy().pick(picks),
proj=False)
# interaction between ICA rejection and PCA components difficult
# to preduct. Rank_after often seems to be 1 higher then
# n_pca_components
assert (n_components < n_pca_components <= rank_after <=
rank_before)
# This is a lot of parameters but they interact so they matter. Also they in
# total take < 2 sec on a workstation.
@pytest.mark.parametrize('n_pca_components', (None, 0.999999))
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('cov', (False, True))
@pytest.mark.parametrize('meg', ('mag', True, False))
@pytest.mark.parametrize('eeg', (False, True))
def test_ica_projs(n_pca_components, proj, cov, meg, eeg):
"""Test that ICA handles projections properly."""
if cov and not proj: # proj is always done with cov
return
if not meg and not eeg: # no channels
return
raw = read_raw_fif(raw_fname).crop(0.5, stop).pick_types(
meg=meg, eeg=eeg)
raw.pick(np.arange(0, len(raw.ch_names), 5)) # just for speed
raw.info.normalize_proj()
assert 10 < len(raw.ch_names) < 75
if eeg:
raw.set_eeg_reference(projection=True)
raw.load_data()
raw._data -= raw._data.mean(-1, keepdims=True)
raw_data = raw.get_data()
assert len(raw.info['projs']) > 0
assert not raw.proj
raw_fit = raw.copy()
kwargs = dict(atol=1e-12 if eeg else 1e-20, rtol=1e-8)
if proj:
raw_fit.apply_proj()
fit_data = raw_fit.get_data()
if proj:
assert not np.allclose(raw_fit.get_data(), raw_data, **kwargs)
else:
assert np.allclose(raw_fit.get_data(), raw_data, **kwargs)
assert raw_fit.proj == proj
if cov:
noise_cov = make_ad_hoc_cov(raw.info)
else:
noise_cov = None
# infomax here just so we don't require sklearn
ica = ICA(max_iter=1, noise_cov=noise_cov, method='infomax',
n_components=10)
with pytest.warns(None): # convergence
ica.fit(raw_fit)
if cov:
assert ica.pre_whitener_.shape == (len(raw.ch_names),) * 2
else:
assert ica.pre_whitener_.shape == (len(raw.ch_names), 1)
with catch_logging() as log:
raw_apply = ica.apply(
raw_fit.copy(), n_pca_components=n_pca_components, verbose=True)
log = log.getvalue()
print(log) # very useful for debugging, might as well leave it in
if proj:
assert 'Applying projection' in log
else:
assert 'Applying projection' not in log
assert_allclose(raw_apply.get_data(), fit_data, **kwargs)
raw_apply = ica.apply(raw.copy())
apply_data = raw_apply.get_data()
assert_allclose(apply_data, fit_data, **kwargs)
if proj:
assert not np.allclose(apply_data, raw_data, **kwargs)
else:
assert_allclose(apply_data, raw_data, **kwargs)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_reset(method):
"""Test ICA resetting."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:10]
run_time_attrs = (
'pre_whitener_',
'unmixing_matrix_',
'mixing_matrix_',
'n_components_',
'n_samples_',
'pca_components_',
'pca_explained_variance_',
'pca_mean_',
'n_iter_'
)
with pytest.warns(UserWarning, match='did not converge'):
ica = ICA(
n_components=3, method=method, max_iter=1).fit(raw, picks=picks)
assert (all(hasattr(ica, attr) for attr in run_time_attrs))
assert ica.labels_ is not None
ica._reset()
assert (not any(hasattr(ica, attr) for attr in run_time_attrs))
assert ica.labels_ is not None
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
@pytest.mark.parametrize('n_components', (2, 0.6))
@pytest.mark.parametrize('noise_cov', (False, True))
@pytest.mark.parametrize('n_pca_components', [20])
def test_ica_core(method, n_components, noise_cov, n_pca_components):
"""Test ICA on raw and epochs."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(0, stop).load_data()
# The None cases help reveal bugs but are time consuming.
if noise_cov:
noise_cov = read_cov(test_cov_name)
noise_cov['projs'] = [] # avoid warnings
else:
noise_cov = None
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[::4]
raw.pick(picks[::4])
raw.del_proj()
del picks
epochs = Epochs(raw, events[:4], event_id, tmin, tmax,
baseline=None, preload=True)
# test essential core functionality
# Test ICA raw
ica = ICA(noise_cov=noise_cov, n_components=n_components,
method=method, max_iter=1)
with pytest.raises(ValueError, match='Cannot check for channels of t'):
'meg' in ica
print(ica) # to test repr
# test fit checker
with pytest.raises(RuntimeError, match='No fit available'):
ica.get_sources(raw)
with pytest.raises(RuntimeError, match='No fit available'):
ica.get_sources(epochs)
# Test error upon empty epochs fitting
with pytest.raises(RuntimeError, match='none were found'):
ica.fit(epochs[0:0])
# test decomposition
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
repr(ica) # to test repr
assert ('mag' in ica) # should now work without error
# test re-fit
unmixing1 = ica.unmixing_matrix_
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
assert_array_almost_equal(unmixing1, ica.unmixing_matrix_)
raw_sources = ica.get_sources(raw)
# test for #3804
assert_equal(raw_sources._filenames, [None])
print(raw_sources)
# test for gh-6271 (scaling of ICA traces)
fig = raw_sources.plot()
assert len(fig.mne.ax_main.lines) in (4, 8)
for line in fig.mne.ax_main.lines:
y = line.get_ydata()
if len(y) > 2: # actual data, not markers
assert np.ptp(y) < 15
plt.close('all')
sources = raw_sources[:, :][0]
assert (sources.shape[0] == ica.n_components_)
# test preload filter
raw3 = raw.copy()
raw3.preload = False
with pytest.raises(RuntimeError, match='to be loaded'):
ica.apply(raw3)
#######################################################################
# test epochs decomposition
ica = ICA(noise_cov=noise_cov, n_components=n_components, method=method)
with pytest.warns(None): # sometimes warns
ica.fit(epochs)
_assert_ica_attributes(ica, epochs.get_data(), limits=(0.2, 20))
data = epochs.get_data()[:, 0, :]
n_samples = np.prod(data.shape)
assert_equal(ica.n_samples_, n_samples)
print(ica) # to test repr
sources = ica.get_sources(epochs).get_data()
assert (sources.shape[1] == ica.n_components_)
with pytest.raises(ValueError, match='target do not have the same nu'):
ica.score_sources(epochs, target=np.arange(1))
# test preload filter
epochs3 = epochs.copy()
epochs3.preload = False
with pytest.raises(RuntimeError, match='requires epochs data to be l'):
ica.apply(epochs3)
# test for bug with whitener updating
_pre_whitener = ica.pre_whitener_.copy()
epochs._data[:, 0, 10:15] *= 1e12
ica.apply(epochs.copy())
assert_array_equal(_pre_whitener, ica.pre_whitener_)
# test expl. var threshold leading to empty sel
ica.n_components = 0.1
with pytest.raises(RuntimeError, match='One PCA component captures most'):
ica.fit(epochs)
offender = 1, 2, 3,
with pytest.raises(ValueError, match='Data input must be of Raw'):
ica.get_sources(offender)
with pytest.raises(TypeError, match='must be an instance of'):
ica.fit(offender)
with pytest.raises(TypeError, match='must be an instance of'):
ica.apply(offender)
# gh-7868
ica.n_pca_components = 3
ica.n_components = None
with pytest.raises(ValueError, match='pca_components.*is greater'):
ica.fit(epochs, picks=[0, 1])
ica.n_pca_components = None
ica.n_components = 3
with pytest.raises(ValueError, match='n_components.*cannot be greater'):
ica.fit(epochs, picks=[0, 1])
@pytest.fixture
def short_raw_epochs():
"""Get small data."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
raw.pick_channels(set(raw.ch_names[::10]) | set(
['EOG 061', 'MEG 1531', 'MEG 1441', 'MEG 0121']))
assert 'eog' in raw
raw.del_proj() # avoid warnings
raw.set_annotations(Annotations([0.5], [0.5], ['BAD']))
raw.resample(100)
# XXX This breaks the tests :(
# raw.info['bads'] = [raw.ch_names[1]]
# Create epochs that have different channels from raw
events = make_fixed_length_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, eog=False)[:-1]
epochs = Epochs(raw, events, None, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, proj=False)
assert len(epochs) == 3
epochs_eog = Epochs(raw, epochs.events, event_id, tmin, tmax,
picks=('meg', 'eog'), baseline=(None, 0), preload=True)
return raw, epochs, epochs_eog
@requires_sklearn
@pytest.mark.slowtest
@pytest.mark.parametrize("method", ["picard", "fastica"])
def test_ica_additional(method, tmpdir, short_raw_epochs):
"""Test additional ICA functionality."""
_skip_check_picard(method)
raw, epochs, epochs_eog = short_raw_epochs
few_picks = np.arange(5)
# test if n_components=None works
ica = ICA(n_components=None, method=method, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(epochs)
_assert_ica_attributes(ica, epochs.get_data('data'), limits=(0.05, 20))
test_cov = read_cov(test_cov_name)
ica = ICA(noise_cov=test_cov, n_components=3, method=method)
assert (ica.info is None)
with pytest.warns(RuntimeWarning, match='normalize_proj'):
ica.fit(raw, picks=few_picks)
_assert_ica_attributes(ica, raw.get_data(np.arange(5)), limits=(1, 90))
assert (isinstance(ica.info, Info))
assert (ica.n_components_ < 5)
ica = ICA(n_components=3, method=method, max_iter=1)
with pytest.raises(RuntimeError, match='No fit'):
ica.save('')
with pytest.warns(Warning, match='converge'):
ica.fit(raw, np.arange(1, 6))
_assert_ica_attributes(
ica, raw.get_data(np.arange(1, 6)))
# check Kuiper index threshold
assert_allclose(ica._get_ctps_threshold(), 0.5)
with pytest.raises(TypeError, match='str or numeric'):
ica.find_bads_ecg(raw, threshold=None)
with pytest.warns(RuntimeWarning, match='is longer than the signal'):
ica.find_bads_ecg(raw, threshold=0.25)
# check invalid `measure`
with pytest.warns(RuntimeWarning, match='longer'):
with pytest.raises(ValueError, match='Unknown measure'):
ica.find_bads_ecg(raw, method='correlation', measure='unknown',
threshold='auto')
# check passing a ch_name to find_bads_ecg
with pytest.warns(RuntimeWarning, match='longer'):
_, scores_1 = ica.find_bads_ecg(raw, threshold='auto')
with pytest.warns(RuntimeWarning, match='longer'):
_, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1], threshold='auto')
assert scores_1[0] != scores_2[0]
# test corrmap
ica2 = ica.copy()
ica3 = ica.copy()
corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True,
ch_type="mag")
with pytest.raises(RuntimeError, match='No component detected'):
corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False,)
corrmap([ica, ica2], (0, 0), threshold=0.5, plot=False, show=False)
assert (ica.labels_["blinks"] == ica2.labels_["blinks"])
assert (0 in ica.labels_["blinks"])
# test retrieval of component maps as arrays
components = ica.get_components()
template = components[:, 0]
EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s')
corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True,
ch_type="mag")
assert (ica2.labels_["blinks"] == ica3.labels_["blinks"])
plt.close('all')
# No match
bad_ica = ica2.copy()
bad_ica.mixing_matrix_[:] = 0.
with pytest.warns(RuntimeWarning, match='divide'):
with catch_logging() as log:
corrmap([ica, bad_ica], (0, 0), threshold=0.5, plot=False,
show=False, verbose=True)
log = log.getvalue()
assert 'No maps selected' in log
# make sure a single threshold in a list works
corrmap([ica, ica3], template, threshold=[0.5], label='blinks', plot=False,
ch_type="mag")
ica_different_channels = ICA(n_components=2, max_iter=1)
with pytest.warns(Warning, match='converge'):
ica_different_channels.fit(raw, picks=[2, 3, 4, 5])
pytest.raises(ValueError, corrmap, [ica_different_channels, ica], (0, 0))
# test warnings on bad filenames
ica_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-ica.fif'):
ica.save(ica_badname)
with pytest.warns(RuntimeWarning, match='-ica.fif'):
read_ica(ica_badname)
# test decim
ica = ICA(n_components=3, method=method, max_iter=1)
raw_ = raw.copy()
for _ in range(3):
raw_.append(raw_)
n_samples = raw_._data.shape[1]
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw, picks=few_picks)
_assert_ica_attributes(ica)
assert raw_._data.shape[1] == n_samples
# test expl var
with pytest.raises(ValueError, match=r".*1.0 \(exclusive\).*"):
ICA(n_components=1., method=method)
with pytest.raises(ValueError, match="Selecting one component"):
ICA(n_components=1, method=method)
ica = ICA(n_components=4, method=method, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
_assert_ica_attributes(ica)
assert ica.n_components_ == 4
ica_var = _ica_explained_variance(ica, raw, normalize=True)
assert (np.all(ica_var[:-1] >= ica_var[1:]))
# test ica sorting
ica.exclude = [0]
ica.labels_ = dict(blink=[0], think=[1])
ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True)
assert_equal(ica_sorted.exclude, [3])
assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2]))
# epochs extraction from raw fit
pytest.raises(RuntimeError, ica.get_sources, epochs)
# test filtering
ica_raw = ica.get_sources(raw)
d1 = ica_raw._data[0].copy()
ica_raw.filter(4, 20, fir_design='firwin2')
assert_equal(ica_raw.info['lowpass'], 20.)
assert_equal(ica_raw.info['highpass'], 4.)
assert ((d1 != ica_raw._data[0]).any())
d1 = ica_raw._data[0].copy()
ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin')
assert ((d1 != ica_raw._data[0]).any())
test_ica_fname = tmpdir.join('test-ica.fif')
ica.n_pca_components = 2
ica.method = 'fake'
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert (ica.n_pca_components == ica_read.n_pca_components)
assert_equal(ica.method, ica_read.method)
assert_equal(ica.labels_, ica_read.labels_)
# check type consistency
attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ '
'pca_explained_variance_ pre_whitener_')
def f(x, y):
return getattr(x, y).dtype
for attr in attrs.split():
assert_equal(f(ica_read, attr), f(ica, attr))
ica.n_pca_components = 4
ica_read.n_pca_components = 4
ica.exclude = []
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_',
'pca_mean_', 'pca_explained_variance_',
'pre_whitener_']:
assert_array_almost_equal(getattr(ica, attr), getattr(ica_read, attr))
assert (ica.ch_names == ica_read.ch_names)
assert (isinstance(ica_read.info, Info))
sources = ica.get_sources(raw)[:, :][0]
sources2 = ica_read.get_sources(raw)[:, :][0]
assert_array_almost_equal(sources, sources2)
_raw1 = ica.apply(raw.copy(), exclude=[1])
_raw2 = ica_read.apply(raw.copy(), exclude=[1])
assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0])
ica = ICA(n_components=2, method=method, max_iter=1)
with pytest.warns(None): # ICA does not converge
ica.fit(raw, picks=few_picks)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(raw, target='EOG 061', score_func=func,
start=0, stop=10)
assert (ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(raw, start=0, stop=50, score_func=stats.skew)
# check exception handling
pytest.raises(ValueError, ica.score_sources, raw,
target=np.arange(1))
params = []
params += [(None, -1, slice(2), [0, 1])] # variance, kurtosis params
params += [(None, 'MEG 1531')] # ECG / EOG channel params
for idx, ch_name in product(*params):
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
# Make sure detect_artifacts marks the right components.
# For int criterion, the doc says "E.g. range(2) would return the two
# sources with the highest score". Assert that's what it does.
# Only test for skew, since it's always the same code.
ica.exclude = []
ica.detect_artifacts(raw, start_find=0, stop_find=50, ecg_ch=None,
eog_ch=None, skew_criterion=0,
var_criterion=None, kurt_criterion=None)
assert np.abs(scores[ica.exclude]) == np.max(np.abs(scores))
evoked = epochs.average()
evoked_data = evoked.data.copy()
raw_data = raw[:][0].copy()
epochs_data = epochs.get_data().copy()
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_ecg(raw, method='ctps', threshold='auto')
assert_equal(len(scores), ica.n_components_)
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_ecg(raw, method='correlation',
threshold='auto')
assert_equal(len(scores), ica.n_components_)
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(epochs, method='ctps', threshold='auto')
assert_equal(len(scores), ica.n_components_)
pytest.raises(ValueError, ica.find_bads_ecg, epochs.average(),
method='ctps', threshold='auto')
pytest.raises(ValueError, ica.find_bads_ecg, raw,
method='crazy-coupling')
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_eog(raw)
assert_equal(len(scores), ica.n_components_)
raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202
with pytest.warns(RuntimeWarning, match='longer'):
idx, scores = ica.find_bads_eog(raw)
assert (isinstance(scores, list))
assert_equal(len(scores[0]), ica.n_components_)
idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441')
assert_equal(len(scores), ica.n_components_)
idx, scores = ica.find_bads_ecg(evoked, method='correlation',
threshold='auto')
assert_equal(len(scores), ica.n_components_)
assert_array_equal(raw_data, raw[:][0])
assert_array_equal(epochs_data, epochs.get_data())
assert_array_equal(evoked_data, evoked.data)
# check score funcs
for name, func in get_score_funcs().items():
if name in score_funcs_unsuited:
continue
scores = ica.score_sources(epochs_eog, target='EOG 061',
score_func=func)
assert (ica.n_components_ == len(scores))
# check univariate stats
scores = ica.score_sources(epochs, score_func=stats.skew)
# check exception handling
pytest.raises(ValueError, ica.score_sources, epochs,
target=np.arange(1))
# ecg functionality
ecg_scores = ica.score_sources(raw, target='MEG 1531',
score_func='pearsonr')
with pytest.warns(RuntimeWarning, match='longer'):
ecg_events = ica_find_ecg_events(
raw, sources[np.abs(ecg_scores).argmax()])
assert (ecg_events.ndim == 2)
# eog functionality
eog_scores = ica.score_sources(raw, target='EOG 061',
score_func='pearsonr')
with pytest.warns(RuntimeWarning, match='longer'):
eog_events = ica_find_eog_events(
raw, sources[np.abs(eog_scores).argmax()])
assert (eog_events.ndim == 2)
# Test ica fiff export
assert raw.last_samp - raw.first_samp + 1 == raw.n_times
assert raw.n_times > 100
ica_raw = ica.get_sources(raw, start=0, stop=100)
assert ica_raw.n_times == 100
assert ica_raw.last_samp - ica_raw.first_samp + 1 == 100
assert_equal(len(ica_raw._filenames), 1) # API consistency
ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch]
assert (ica.n_components_ == len(ica_chans))
test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif')
ica.n_components = np.int32(ica.n_components)
ica_raw.save(test_ica_fname, overwrite=True)
ica_raw2 = read_raw_fif(test_ica_fname, preload=True)
assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4)
ica_raw2.close()
os.remove(test_ica_fname)
# Test ica epochs export
ica_epochs = ica.get_sources(epochs)
assert (ica_epochs.events.shape == epochs.events.shape)
ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch]
assert (ica.n_components_ == len(ica_chans))
assert (ica.n_components_ == ica_epochs.get_data().shape[1])
assert (ica_epochs._raw is None)
assert (ica_epochs.preload is True)
# test float n pca components
ica.pca_explained_variance_ = np.array([0.2] * 5)
ica.n_components_ = 0
for ncomps, expected in [[0.3, 2], [0.9, 5], [1, 1]]:
ncomps_ = ica._check_n_pca_components(ncomps)
assert (ncomps_ == expected)
ica = ICA(method=method)
with pytest.warns(None): # sometimes does not converge
ica.fit(raw, picks=few_picks)
_assert_ica_attributes(ica, raw.get_data(few_picks))
with pytest.warns(RuntimeWarning, match='longer'):
ica.find_bads_ecg(raw, threshold='auto')
ica.find_bads_eog(epochs, ch_name='MEG 0121')
assert_array_equal(raw_data, raw[:][0])
raw.drop_channels(raw.ch_names[:2])
with pytest.raises(RuntimeError, match='match fitted'):
with pytest.warns(RuntimeWarning, match='longer'):
ica.find_bads_eog(raw)
with pytest.raises(RuntimeError, match='match fitted'):
with pytest.warns(RuntimeWarning, match='longer'):
ica.find_bads_ecg(raw, threshold='auto')
@requires_sklearn
@pytest.mark.slowtest
@pytest.mark.parametrize('method, cov', [
('picard', None),
('picard', test_cov_name),
('fastica', None),
])
def test_ica_cov(method, cov, tmpdir, short_raw_epochs):
"""Test ICA with cov."""
_skip_check_picard(method)
raw, epochs, epochs_eog = short_raw_epochs
if cov is not None:
cov = read_cov(cov)
# test reading and writing
test_ica_fname = tmpdir.join('test-ica.fif')
kwargs = dict(n_pca_components=4)
ica = ICA(noise_cov=cov, n_components=2, method=method, max_iter=1)
with pytest.warns(None): # ICA does not converge
ica.fit(raw, picks=np.arange(10))
_assert_ica_attributes(ica)
sources = ica.get_sources(epochs).get_data()
assert (ica.mixing_matrix_.shape == (2, 2))
assert (ica.unmixing_matrix_.shape == (2, 2))
assert (ica.pca_components_.shape == (10, 10))
assert (sources.shape[1] == ica.n_components_)
for exclude in [[], [0], np.array([1, 2, 3])]:
ica.exclude = exclude
ica.labels_ = {'foo': [0]}
ica.save(test_ica_fname)
ica_read = read_ica(test_ica_fname)
assert (list(ica.exclude) == ica_read.exclude)
assert_equal(ica.labels_, ica_read.labels_)
ica.apply(raw.copy(), **kwargs)
ica.exclude = []
ica.apply(raw.copy(), exclude=[1], **kwargs)
assert (ica.exclude == [])
ica.exclude = [0, 1]
ica.apply(raw.copy(), exclude=[1], **kwargs)
assert (ica.exclude == [0, 1])
ica_raw = ica.get_sources(raw)
assert (ica.exclude == [ica_raw.ch_names.index(e) for e in
ica_raw.info['bads']])
@requires_sklearn
@pytest.mark.parametrize("method", ("fastica", "picard", "infomax"))
@pytest.mark.parametrize("idx", (None, -1, slice(2), [0, 1]))
@pytest.mark.parametrize("ch_name", (None, 'MEG 1531'))
def test_detect_artifacts_replacement_of_run_ica(method, idx, ch_name):
"""Test replacement workflow for run_ica() function."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
ica = ICA(n_components=2, method=method)
ica.fit(raw)
ica.detect_artifacts(raw, start_find=0, stop_find=5, ecg_ch=ch_name,
eog_ch=ch_name, skew_criterion=idx,
var_criterion=idx, kurt_criterion=idx)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_reject_buffer(method):
"""Test ICA data raw buffer rejection."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
raw._data[2, 1000:1005] = 5e-12
ica = ICA(n_components=3, method=method)
with catch_logging() as drop_log:
ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2,
tstep=0.01, verbose=True, reject_by_annotation=False)
assert (raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_)
log = [line for line in drop_log.getvalue().split('\n')
if 'detected' in line]
assert_equal(len(log), 1)
_assert_ica_attributes(ica)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_ica_twice(method):
"""Test running ICA twice."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
raw.pick(raw.ch_names[::10])
picks = pick_types(raw.info, meg='grad', exclude='bads')
n_components = 0.99
n_pca_components = 0.9999
if method == 'fastica':
ctx = pytest.warns(None) # convergence, sometimes
else:
ctx = nullcontext()
ica1 = ICA(n_components=n_components, method=method)
with ctx:
ica1.fit(raw, picks=picks, decim=3)
raw_new = ica1.apply(raw, n_pca_components=n_pca_components)
ica2 = ICA(n_components=n_components, method=method)
with ctx:
ica2.fit(raw_new, picks=picks, decim=3)
assert_equal(ica1.n_components_, ica2.n_components_)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard", "infomax"])
def test_fit_params(method, tmpdir):
"""Test fit_params for ICA."""
_skip_check_picard(method)
fit_params = {}
# test no side effects
ICA(fit_params=fit_params, method=method)
assert fit_params == {}
# Test I/O roundtrip.
# Only picard and infomax support the "extended" keyword, so limit the
# tests to those.
if method in ['picard', 'infomax']:
tmpdir = str(tmpdir)
output_fname = op.join(tmpdir, 'test_ica-ica.fif')
raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data()
n_components = 3
max_iter = 1
fit_params = dict(extended=True)
ica = ICA(fit_params=fit_params, n_components=n_components,
max_iter=max_iter, method=method)
fit_params_after_instantiation = ica.fit_params
if method == 'infomax':
ica.fit(raw)
else:
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
ica.save(output_fname)
ica = read_ica(output_fname)
assert ica.fit_params == fit_params_after_instantiation
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
@pytest.mark.parametrize("allow_ref_meg", [True, False])
def test_bad_channels(method, allow_ref_meg):
"""Test exception when unsupported channels are used."""
_skip_check_picard(method)
chs = list(get_channel_type_constants())
info = create_info(len(chs), 500, chs)
rng = np.random.RandomState(0)
data = rng.rand(len(chs), 50)
raw = RawArray(data, info)
data = rng.rand(100, len(chs), 50)
epochs = EpochsArray(data, info)
# fake high-pass filtering
raw.info['highpass'] = 1.0
epochs.info['highpass'] = 1.0
n_components = 0.9
data_chs = list(_DATA_CH_TYPES_SPLIT + ('eog',))
if allow_ref_meg:
data_chs.append('ref_meg')
chs_bad = list(set(chs) - set(data_chs))
ica = ICA(n_components=n_components, method=method,
allow_ref_meg=allow_ref_meg)
for inst in [raw, epochs]:
for ch in chs_bad:
if allow_ref_meg:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
ref_meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
ref_meg=True,
**{str(ch): True})
else:
# Test case for only bad channels
picks_bad1 = pick_types(inst.info, meg=False,
**{str(ch): True})
# Test case for good and bad channels
picks_bad2 = pick_types(inst.info, meg=True,
**{str(ch): True})
pytest.raises(ValueError, ica.fit, inst, picks=picks_bad1)
pytest.raises(ValueError, ica.fit, inst, picks=picks_bad2)
pytest.raises(ValueError, ica.fit, inst, picks=[])
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_eog_channel(method):
"""Test that EOG channel is included when performing ICA."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname, preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=True, ecg=False,
eog=True, exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=None, preload=True, proj=False)
n_components = 0.9
ica = ICA(n_components=n_components, method=method)
# Test case for MEG and EOG data. Should have EOG channel
for inst in [raw, epochs]:
picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:4]
picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False,
eog=True, exclude='bads')
picks1 = np.append(picks1a, picks1b)
ica.fit(inst, picks=picks1)
assert (any('EOG' in ch for ch in ica.ch_names))
_assert_ica_attributes(ica, inst.get_data(picks1), limits=(0.8, 600))
# Test case for MEG data. Should have no EOG channel
for inst in [raw, epochs]:
picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')[:5]
ica.fit(inst, picks=picks1)
_assert_ica_attributes(ica)
assert not any('EOG' in ch for ch in ica.ch_names)
@requires_sklearn
@pytest.mark.parametrize("method", ["fastica", "picard"])
def test_n_components_none(method, tmpdir):
"""Test n_components=None."""
_skip_check_picard(method)
raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data()
events = read_events(event_name)
picks = pick_types(raw.info, eeg=True, meg=False)[::5]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
n_components = None
random_state = 12345
output_fname = tmpdir.join('test_ica-ica.fif')
ica = ICA(method=method, n_components=n_components,
random_state=random_state)
with pytest.warns(None):
ica.fit(epochs)
_assert_ica_attributes(ica)
ica.save(output_fname)
ica = read_ica(output_fname)
_assert_ica_attributes(ica)
assert ica.n_pca_components is None
assert ica.n_components is None
assert ica.n_components_ == len(picks)
@requires_sklearn
@testing.requires_testing_data
def test_ica_ctf():
"""Test run ICA computation on ctf data with/without compensation."""
method = 'fastica'
raw = read_raw_ctf(ctf_fname, preload=True)
picks = sorted(set(range(0, len(raw.ch_names), 10)) |
set(pick_types(raw.info, ref_meg=True)))
raw.pick(picks)
events = make_fixed_length_events(raw, 99999)
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events=events, tmin=-0.2, tmax=0.2, baseline=None,
preload=True)
evoked = epochs.average()
# test fit
for inst in [raw, epochs]:
ica = ICA(n_components=2, max_iter=2, method=method)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(inst)
_assert_ica_attributes(ica)
# test apply and get_sources
for inst in [raw, epochs, evoked]:
ica.apply(inst.copy())
ica.get_sources(inst)
# test mixed compensation case
raw.apply_gradient_compensation(0)
ica = ICA(n_components=2, max_iter=2, method=method)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
_assert_ica_attributes(ica)
raw.apply_gradient_compensation(1)
epochs = Epochs(raw, events=events, tmin=-0.2, tmax=0.2, baseline=None,
preload=True)
evoked = epochs.average()
for inst in [raw, epochs, evoked]:
with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
ica.apply(inst.copy())
with pytest.raises(RuntimeError, match='Compensation grade of ICA'):
ica.get_sources(inst)
@requires_sklearn
@testing.requires_testing_data
def test_ica_labels():
"""Test ICA labels."""
# The CTF data are uniquely well suited to testing the ICA.find_bads_
# methods
raw = read_raw_ctf(ctf_fname, preload=True)
raw.pick_channels(raw.ch_names[:300:10] + raw.ch_names[300:])
# set the appropriate EEG channels to EOG and ECG
rename = {'EEG057': 'eog', 'EEG058': 'eog', 'EEG059': 'ecg'}
for key in rename:
assert key in raw.ch_names
raw.set_channel_types(rename)
ica = ICA(n_components=4, max_iter=2, method='fastica', allow_ref_meg=True)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
_assert_ica_attributes(ica)
ica.find_bads_eog(raw, l_freq=None, h_freq=None)
picks = list(pick_types(raw.info, meg=False, eog=True))
for idx, ch in enumerate(picks):
assert '{}/{}/{}'.format('eog', idx, raw.ch_names[ch]) in ica.labels_
assert 'eog' in ica.labels_
for key in ('ecg', 'ref_meg', 'ecg/ECG-MAG'):
assert key not in ica.labels_
ica.find_bads_ecg(raw, l_freq=None, h_freq=None, method='correlation',
threshold='auto')
picks = list(pick_types(raw.info, meg=False, ecg=True))
for idx, ch in enumerate(picks):
assert '{}/{}/{}'.format('ecg', idx, raw.ch_names[ch]) in ica.labels_
for key in ('ecg', 'eog'):
assert key in ica.labels_
for key in ('ref_meg', 'ecg/ECG-MAG'):
assert key not in ica.labels_
# derive reference ICA components and append them to raw
ica_rf = ICA(n_components=2, max_iter=2, allow_ref_meg=True)
with pytest.warns(UserWarning, match='did not converge'):
ica_rf.fit(raw.copy().pick_types(meg=False, ref_meg=True))
icacomps = ica_rf.get_sources(raw)
# rename components so they are auto-detected by find_bads_ref
icacomps.rename_channels({c: 'REF_' + c for c in icacomps.ch_names})
# and add them to raw
raw.add_channels([icacomps])
ica.find_bads_ref(raw, l_freq=None, h_freq=None, method="separate")
picks = pick_channels_regexp(raw.ch_names, 'REF_ICA*')
for idx, ch in enumerate(picks):
assert '{}/{}/{}'.format('ref_meg', idx,
raw.ch_names[ch]) in ica.labels_
ica.find_bads_ref(raw, l_freq=None, h_freq=None, method="together")
assert 'ref_meg' in ica.labels_
for key in ('ecg', 'eog', 'ref_meg'):
assert key in ica.labels_
assert 'ecg/ECG-MAG' not in ica.labels_
ica.find_bads_ecg(raw, l_freq=None, h_freq=None, threshold='auto')
for key in ('ecg', 'eog', 'ref_meg', 'ecg/ECG-MAG'):
assert key in ica.labels_
@requires_sklearn
@testing.requires_testing_data
@pytest.mark.parametrize('fname, grade', [
(fif_fname, None),
(eeglab_fname, None),
(ctf_fname2, 0),
(ctf_fname2, 1),
])
def test_ica_eeg(fname, grade):
"""Test ICA on EEG."""
method = 'fastica'
if fname.endswith('.fif'):
raw = read_raw_fif(fif_fname)
raw.pick(raw.ch_names[::5]).load_data()
raw.info.normalize_proj()
elif fname.endswith('.set'):
raw = read_raw_eeglab(input_fname=eeglab_fname, preload=True)
else:
with pytest.warns(RuntimeWarning, match='MISC channel'):
raw = read_raw_ctf(ctf_fname2)
raw.pick(raw.ch_names[:30] + raw.ch_names[30::10]).load_data()
if grade is not None:
raw.apply_gradient_compensation(grade)
events = make_fixed_length_events(raw, 99999, start=0, stop=0.3,
duration=0.1)
picks_meg = pick_types(raw.info, meg=True, eeg=False, ref_meg=False)[:2]
picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2]
picks_all = []
picks_all.extend(picks_meg)
picks_all.extend(picks_eeg)
epochs = Epochs(raw, events=events, tmin=-0.1, tmax=0.1, baseline=None,
preload=True, proj=False)
evoked = epochs.average()
for picks in [picks_meg, picks_eeg, picks_all]:
if len(picks) == 0:
continue
# test fit
for inst in [raw, epochs]:
ica = ICA(n_components=2, max_iter=2, method=method)
with pytest.warns(None):
ica.fit(inst, picks=picks, verbose=True)
_assert_ica_attributes(ica)
# test apply and get_sources
for inst in [raw, epochs, evoked]:
ica.apply(inst)
ica.get_sources(inst)
@testing.requires_testing_data
def test_read_ica_eeglab():
"""Test read_ica_eeglab function."""
fname = op.join(test_base_dir, "EEGLAB", "test_raw.set")
fname_cleaned_matlab = op.join(test_base_dir, "EEGLAB",
"test_raw.cleaned.set")
raw = read_raw_eeglab(fname, preload=True)
raw_eeg = _check_load_mat(fname, None)
raw_cleaned_matlab = read_raw_eeglab(fname_cleaned_matlab,
preload=True)
mark_to_remove = ["manual"]
comp_info = raw_eeg.marks["comp_info"]
if len(comp_info["flags"].shape) > 1:
ind_comp_to_drop = [np.where(flags)[0]
for flags, label in zip(comp_info["flags"],
comp_info["label"])
if label in mark_to_remove]
ind_comp_to_drop = np.unique(np.concatenate(ind_comp_to_drop))
else:
ind_comp_to_drop = np.where(comp_info["flags"])[0]
ica = read_ica_eeglab(fname)
_assert_ica_attributes(ica)
raw_cleaned = ica.apply(raw.copy(), exclude=ind_comp_to_drop)
assert_allclose(raw_cleaned_matlab.get_data(), raw_cleaned.get_data(),
rtol=1e-05, atol=1e-08)
@testing.requires_testing_data
def test_read_ica_eeglab_mismatch(tmpdir):
"""Test read_ica_eeglab function when there is a mismatch."""
fname_orig = op.join(test_base_dir, "EEGLAB", "test_raw.set")
base = op.basename(fname_orig)[:-3]
shutil.copyfile(fname_orig[:-3] + 'fdt', tmpdir.join(base + 'fdt'))
fname = tmpdir.join(base)
data = loadmat(fname_orig)
w = data['EEG']['icaweights'][0][0]
w[:] = np.random.RandomState(0).randn(*w.shape)
savemat(str(fname), data, appendmat=False)
assert op.isfile(fname)
with pytest.warns(RuntimeWarning, match='Mismatch.*removal.*icawinv.*'):
ica = read_ica_eeglab(fname)
_assert_ica_attributes(ica)
ica_correct = read_ica_eeglab(fname_orig)
attrs = [attr for attr in dir(ica_correct)
if attr.endswith('_') and not attr.startswith('_')]
assert 'mixing_matrix_' in attrs
assert 'unmixing_matrix_' in attrs
assert ica.labels_ == ica_correct.labels_ == {}
attrs.pop(attrs.index('labels_'))
for attr in attrs:
a, b = getattr(ica, attr), getattr(ica_correct, attr)
assert_allclose(a, b, rtol=1e-12, atol=1e-12, err_msg=attr)
def _assert_ica_attributes(ica, data=None, limits=(1.0, 70)):
"""Assert some attributes of ICA objects."""
__tracebackhide__ = True
# This tests properties, but also serves as documentation of
# the shapes these arrays can obtain and how they obtain them
# Pre-whitener
n_ch = len(ica.ch_names)
assert ica.pre_whitener_.shape == (
n_ch, n_ch if ica.noise_cov is not None else 1)
# PCA
n_pca = ica.pca_components_.shape[0]
assert ica.pca_components_.shape == (n_pca, n_ch), 'PCA shape'
assert_allclose(np.dot(ica.pca_components_, ica.pca_components_.T),
np.eye(n_pca), atol=1e-6, err_msg='PCA orthogonality')
assert ica.pca_mean_.shape == (n_ch,)
# Mixing/unmixing
assert ica.unmixing_matrix_.shape == (ica.n_components_,) * 2, \
'Unmixing shape'
assert ica.mixing_matrix_.shape == (ica.n_components_,) * 2, \
'Mixing shape'
mix_unmix = np.dot(ica.mixing_matrix_, ica.unmixing_matrix_)
s = linalg.svdvals(ica.unmixing_matrix_)
nz = len(s) - (s > s[0] * 1e-12).sum()
want = np.eye(ica.n_components_)
want[:nz] = 0
assert_allclose(mix_unmix, want, atol=1e-6, err_msg='Mixing as pinv')
assert ica.pca_explained_variance_.shape[0] >= \
ica.unmixing_matrix_.shape[1]
# our PCA components should be unit vectors (the variances get put into
# the unmixing_matrix_ to make it a whitener)
norms = np.linalg.norm(ica.pca_components_, axis=1)
assert_allclose(norms, 1.)
# let's check the whitening
if data is not None:
if data.ndim == 3:
data = data.transpose(1, 0, 2).reshape(data.shape[1], -1)
data = ica._transform_raw(RawArray(data, ica.info), 0, None)
norms = np.linalg.norm(data, axis=1)
# at least close to normal
assert norms.min() > limits[0], 'Not roughly unity'
assert norms.max() < limits[1], 'Not roughly unity'
@pytest.mark.parametrize("ch_type", ["dbs", "seeg"])
def test_ica_ch_types(ch_type):
"""Test ica with different channel types."""
# gh-8739
data = np.random.RandomState(0).randn(10, 1000)
info = create_info(10, 1000., ch_type)
raw = RawArray(data, info)
events = make_fixed_length_events(raw, 99999, start=0, stop=0.3,
duration=0.1)
epochs = Epochs(raw, events=events, tmin=-0.1, tmax=0.1, baseline=None,
preload=True, proj=False)
evoked = epochs.average()
# test fit
method = 'infomax'
for inst in [raw, epochs]:
ica = ICA(n_components=2, max_iter=2, method=method)
with pytest.warns(None):
ica.fit(inst, verbose=True)
_assert_ica_attributes(ica)
# test apply and get_sources
for inst in [raw, epochs, evoked]:
ica.apply(inst)
ica.get_sources(inst)
| bsd-3-clause |
dribnet/dagbldr | examples/faces_conditional_vae/flying_conditional_faces.py | 3 | 5177 | import argparse
import numpy as np
import os
from dagbldr.datasets import fetch_fer
from dagbldr.utils import convert_to_one_hot
from dagbldr.utils import load_checkpoint, interpolate_between_points, make_gif
parser = argparse.ArgumentParser()
parser.add_argument("saved_functions_file",
help="Saved pickle file from vae training")
parser.add_argument("--seed", "-s",
help="random seed for path calculation",
action="store", default=1979, type=int)
args = parser.parse_args()
if not os.path.exists(args.saved_functions_file):
raise ValueError("Please provide a valid path for saved pickle file!")
checkpoint_dict = load_checkpoint(args.saved_functions_file)
encode_function = checkpoint_dict["encode_function"]
decode_function = checkpoint_dict["decode_function"]
predict_function = checkpoint_dict["predict_function"]
fer = fetch_fer()
data = fer["data"]
valid_indices = fer["valid_indices"]
valid_data = data[valid_indices]
mean_norm = fer["mean0"]
pca_tf = fer["pca_matrix"]
X = valid_data - mean_norm
X = np.dot(X, pca_tf.T)
y = fer["target"][valid_indices]
n_classes = len(set(y))
random_state = np.random.RandomState(args.seed)
# number of samples
n_plot_samples = 5
# tfd dimensions
width = 48
height = 48
# Get random data samples
ind = np.arange(len(X))
random_state.shuffle(ind)
sample_X = X[ind[:n_plot_samples]]
sample_y = y[ind[:n_plot_samples]]
def gen_samples(X, y):
mu, log_sig = encode_function(X)
# No noise at test time - repeat y twice because y_pred is needed for Theano
# But it is not used unless y_sym is all -1
out, = np.dot(decode_function(mu + np.exp(log_sig), y), pca_tf) + mean_norm
return out
# VAE specific plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
all_pred_y, = predict_function(X)
all_pred_y = np.argmax(all_pred_y, axis=1)
accuracy = np.mean(all_pred_y.ravel() == y.ravel())
f, axarr = plt.subplots(n_plot_samples, 2)
n_correct_to_show = n_plot_samples // 2
n_incorrect_to_show = n_plot_samples - n_correct_to_show
correct_ind = np.where(all_pred_y == y)[0]
incorrect_ind = np.where(all_pred_y != y)[0]
random_state.shuffle(correct_ind)
random_state.shuffle(incorrect_ind)
c = correct_ind[:n_correct_to_show]
i = incorrect_ind[:n_incorrect_to_show]
X_corr = X[c]
X_incorr = X[i]
X_stack = np.vstack((X_corr, X_incorr))
y_corr = convert_to_one_hot(y[c], n_classes)
y_incorr = convert_to_one_hot(y[i], n_classes)
y_stack = np.vstack((y_corr, y_incorr))
generated_X = gen_samples(X_stack, y_stack)
predicted_y = convert_to_one_hot(np.hstack((all_pred_y[c], all_pred_y[i])),
n_classes=n_classes)
for n, (X_i, y_i, sx_i, sy_i) in enumerate(
zip(np.dot(X_stack, pca_tf) + mean_norm, y_stack,
generated_X, predicted_y)):
axarr[n, 0].matshow(X_i.reshape(width, height), cmap="gray")
axarr[n, 1].matshow(sx_i.reshape(width, height), cmap="gray")
axarr[n, 0].axis('off')
axarr[n, 1].axis('off')
y_a = np.argmax(y_i)
sy_a = np.argmax(sy_i)
axarr[n, 0].text(0, 7, str(y_a), color='green')
if y_a == sy_a:
axarr[n, 1].text(0, 7, str(sy_a), color='green')
else:
axarr[n, 1].text(0, 7, str(sy_a), color='red')
f.suptitle("Validation accuracy: %s" % str(accuracy))
plt.savefig('vae_reconstruction.png')
plt.close()
# Style plotting
f, axarr = plt.subplots(n_plot_samples, n_classes + 1)
for n, (X_i, y_i) in enumerate(zip(sample_X,
convert_to_one_hot(sample_y, n_classes))):
orig_X = np.dot(X_i[None], pca_tf) + mean_norm
axarr[n, 0].matshow(orig_X.reshape(width, height), cmap="gray")
axarr[n, 0].axis('off')
fixed_mu, fixed_sigma = encode_function(X_i[None])
all_mu = fixed_mu * np.ones((n_classes, fixed_mu.shape[1])).astype(
"float32")
all_sigma = fixed_sigma * np.ones((n_classes, fixed_sigma.shape[1])).astype(
"float32")
all_classes = np.eye(n_classes).astype('int32')
all_recs, = np.dot(decode_function(all_mu + np.exp(all_sigma), all_classes),
pca_tf) + mean_norm
for j in range(1, n_classes + 1):
axarr[n, j].matshow(all_recs[j - 1].reshape(width, height), cmap="gray")
axarr[n, j].axis('off')
f.suptitle("Style variation by changing conditional")
plt.savefig('vae_style.png')
plt.close()
# Calculate noisy linear path between points in space
mus, log_sigmas = encode_function(sample_X)
n_steps = 20
mu_path = interpolate_between_points(mus, n_steps=n_steps)
log_sigma_path = interpolate_between_points(log_sigmas, n_steps=n_steps)
# Noisy path across space from one point to another
path_X = mu_path + np.exp(log_sigma_path)
path_y = np.zeros((len(path_X), n_classes), dtype="int32")
for i in range(n_plot_samples):
path_y[i * n_steps:(i + 1) * n_steps] = sample_y[i]
out, = np.dot(decode_function(path_X, path_y), pca_tf) + mean_norm
text_y = [str(np.argmax(path_y[i])) for i in range(len(path_y))]
color_y = ["white"] * len(text_y)
make_gif(out, "vae_code.gif", width, height, list_text_per_frame=text_y,
list_text_per_frame_color=color_y, delay=1, grayscale=True)
| bsd-3-clause |
MechCoder/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 19 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n // 2]
Y_train = Y[:n // 2]
X_test = X[n // 2:]
Y_test = Y[n // 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = cca.transform(X_train, Y_train)
X_test_r, Y_test_r = cca.transform(X_test, Y_test)
| bsd-3-clause |
antepsis/anteplahmacun | sympy/plotting/tests/test_plot.py | 16 | 9867 | from sympy import (pi, sin, cos, Symbol, Integral, Sum, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
import warnings
class MockPrint(object):
def write(self, s):
pass
def flush(self):
pass
encoding = 'utf-8'
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p._backend.close()
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p._backend.close()
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p._backend.close()
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p._backend.close()
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
p._backend.close()
raises(ValueError, lambda: plot(x, y))
p = plot(Piecewise((1, x > 0), (0, True)),(x,-1,1))
p.save(tmp_file('%s_plot_piecewise' % name))
p._backend.close()
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
p._backend.close()
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
p._backend.close()
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
p._backend.close()
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
p._backend.close()
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
p._backend.close()
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p._backend.close()
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
p._backend.close()
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
p._backend.close()
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
p._backend.close()
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
p._backend.close()
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
p._backend.close()
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p._backend.close()
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p._backend.close()
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p._backend.close()
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p._backend.close()
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
p._backend.close()
###
# Examples from the 'advanced' notebook
###
# XXX: This raises the warning "The evaluation of the expression is
# problematic. We are trying a failback method that may still work. Please
# report this as a bug." It has to use the fallback because using evalf()
# is the only way to evaluate the integral. We should perhaps just remove
# that warning.
with warnings.catch_warnings(record=True) as w:
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
p._backend.close()
# Make sure no other warnings were raised
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "The evaluation of the expression is problematic" in str(w[0].message)
s = Sum(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p._backend.close()
p = plot(Sum(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
p._backend.close()
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
beepee14/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
shikhardb/scikit-learn | sklearn/tests/test_common.py | 4 | 15884 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_dtype_object,
check_parameters_default_constructible,
check_estimator_sparse_data,
check_estimators_dtypes,
check_transformer,
check_clustering,
check_clusterer_compute_labels_predict,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_pickle,
check_transformers_unfitted,
check_estimators_empty_data_messages,
check_estimators_nan_inf,
check_estimators_unfitted,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_estimators_partial_fit_n_features,
check_sparsify_coefficients,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_fit_score_takes_y,
check_non_transformer_estimators_n_iter,
check_regressors_no_decision_function,
check_pipeline_consistency,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.endswith("HMM") or name.startswith("_"):
continue
yield check_estimators_dtypes, name, Estimator
yield check_fit_score_takes_y, name, Estimator
yield check_dtype_object, name, Estimator
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages, name, Estimator
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency, name, Estimator
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf, name, Estimator
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients, name, Estimator
yield check_estimator_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
yield check_transformers_unfitted, name, Transformer
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_clusterer_compute_labels_predict, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
yield check_estimators_partial_fit_n_features, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
yield check_estimators_partial_fit_n_features, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
# test if NotFittedError is raised
yield check_estimators_unfitted, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
yield check_estimators_partial_fit_n_features, name, Regressor
yield check_regressors_no_decision_function, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
# Test if NotFittedError is raised
yield check_estimators_unfitted, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
# Test that class_weight="auto" improves f1-score
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
| bsd-3-clause |
robertnishihara/ray | python/ray/tune/examples/pbt_dcgan_mnist/common.py | 1 | 7732 | import ray
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
from torch.autograd import Variable
from torch.nn import functional as F
from scipy.stats import entropy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# Training parameters
dataroot = ray.utils.get_user_temp_dir() + os.sep
workers = 2
batch_size = 64
image_size = 32
# Number of channels in the training images. For color images this is 3
nc = 1
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 32
# Size of feature maps in discriminator
ndf = 32
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# iterations of actual training in each Trainable _train
train_iterations_per_step = 5
MODEL_PATH = os.path.expanduser("~/.ray/models/mnist_cnn.pt")
def get_data_loader():
dataset = dset.MNIST(
root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
]))
# Create the dataloader
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
return dataloader
# __GANmodel_begin__
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# Generator Code
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh())
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
return self.main(input)
# __GANmodel_end__
# __INCEPTION_SCORE_begin__
class Net(nn.Module):
"""
LeNet for MNist classification, used for inception_score
"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def inception_score(imgs, mnist_model_ref, batch_size=32, splits=1):
N = len(imgs)
dtype = torch.FloatTensor
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
cm = ray.get(mnist_model_ref) # Get the mnist model from Ray object store.
up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype)
def get_pred(x):
x = up(x)
x = cm(x)
return F.softmax(x).data.cpu().numpy()
preds = np.zeros((N, 10))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits):(k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
# __INCEPTION_SCORE_end__
def train(netD, netG, optimG, optimD, criterion, dataloader, iteration, device,
mnist_model_ref):
real_label = 1
fake_label = 0
for i, data in enumerate(dataloader, 0):
if i >= train_iterations_per_step:
break
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size, ), real_label, device=device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
noise = torch.randn(b_size, nz, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(real_label)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimG.step()
is_score, is_std = inception_score(fake, mnist_model_ref)
# Output training stats
if iteration % 10 == 0:
print("[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z))"
": %.4f / %.4f \tInception score: %.4f" %
(iteration, len(dataloader), errD.item(), errG.item(), D_x,
D_G_z1, D_G_z2, is_score))
return errG.item(), errD.item(), is_score
def plot_images(dataloader):
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Original Images")
plt.imshow(
np.transpose(
vutils.make_grid(real_batch[0][:64], padding=2,
normalize=True).cpu(), (1, 2, 0)))
plt.show()
def demo_gan(checkpoint_paths):
img_list = []
fixed_noise = torch.randn(64, nz, 1, 1)
for netG_path in checkpoint_paths:
loadedG = Generator()
loadedG.load_state_dict(torch.load(netG_path)["netGmodel"])
with torch.no_grad():
fake = loadedG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
for i in img_list]
ani = animation.ArtistAnimation(
fig, ims, interval=1000, repeat_delay=1000, blit=True)
ani.save("./generated.gif", writer="imagemagick", dpi=72)
plt.show()
| apache-2.0 |
soylentdeen/BlurryApple | Control/tiptilt.py | 1 | 1849 | import scipy
import numpy
import pyfits
import matplotlib.pyplot as pyplot
from scipy.linalg import *
import datetime
import os
import re
datadir = '/home/deen/Data/GRAVITY/InteractionMatrices/'
#datafiles = os.listdir(datadir)
#datafiles = ["HODM_HighSNR_IM.fits", "HODM_HighSNR_IM_1.fits", "HODM_HighSNR_IM_2.fits", "HODM_HighSNR_IM_3.fits", "HODM_HighSNR_IM_4.fits", "HODM_HighSNR_IM_5.fits", "HODM_HighSNR_IM_6.fits", "HODM_HighSNR_IM_7.fits", "HODM_HighSNR_IM_8.fits"]
datafiles = ["HODM_rapid_016.fits","HODM_rapid_017.fits","HODM_rapid_018.fits","HODM_rapid_019.fits","HODM_rapid_020.fits","HODM_rapid_021.fits","HODM_rapid_022.fits","HODM_rapid_023.fits"]
fig = pyplot.figure(0)
fig.clear()
ax1=fig.add_axes([0.1, 0.15, 0.8, 0.4])
ax2=fig.add_axes([0.1, 0.55, 0.8, 0.4])
dates = []
amplitudes = []
tips = []
tilts = []
for df in datafiles:
#if ((df.find('TT') == -1) & (df.find("rapid") != -1)):
if True:#if ((df.find('TT') == -1) & (df.find("IM") != -1) & (df.find("fits") != -1)):
measdf = datadir+df
meas = pyfits.getdata(measdf)
head = pyfits.getheader(measdf)
dates.append(datetime.datetime.strptime(head["DATE"], "%Y-%m-%dT%H:%M:%S.%f"))
amplitudes.append(head["AMPLITUDE"])
meas = scipy.matrix(meas)
U,S,V = svd(meas)
tips.append(V[1,:])
tilts.append(V[2,:])
dates = numpy.array(dates)
chronology = dates.argsort()
for i in chronology:
#ax1.plot(tips[i], label = dates[i].strftime("Day %j"))
ax1.plot(tips[i], label = "A = "+str(amplitudes[i]))
ax2.plot(tilts[i])
box = ax1.get_position()
ax1.set_position([box.x0, box.y0+box.height*0.2, box.width, box.height*0.80])
ax1.legend(ncol=4, loc='upper center', bbox_to_anchor=(0.5, -0.05))
ax1.text(2, 0.25, 'Tip')
ax2.text(2, 0.25, 'Tilt')
fig.show()
fig.savefig("tiptilt_evolution.png")
| gpl-2.0 |
eric-haibin-lin/mxnet | example/neural_collaborative_filtering/core/load.py | 4 | 2606 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from collections import namedtuple
import pandas as pd
RatingData = namedtuple('RatingData',
['items', 'users', 'ratings', 'min_date', 'max_date'])
def describe_ratings(ratings):
info = RatingData(items=len(ratings['item_id'].unique()),
users=len(ratings['user_id'].unique()),
ratings=len(ratings),
min_date=ratings['timestamp'].min(),
max_date=ratings['timestamp'].max())
print("{ratings} ratings on {items} items from {users} users"
" from {min_date} to {max_date}"
.format(**(info._asdict())))
return info
def process_movielens(ratings, sort=True):
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
if sort:
ratings.sort_values(by='timestamp', inplace=True)
describe_ratings(ratings)
return ratings
def load_ml_1m(filename, sort=True):
names = ['user_id', 'item_id', 'rating', 'timestamp']
ratings = pd.read_csv(filename, sep='::', names=names, engine='python')
return process_movielens(ratings, sort=sort)
def load_ml_20m(filename, sort=True):
ratings = pd.read_csv(filename)
ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s')
names = {'userId': 'user_id', 'movieId': 'item_id'}
ratings.rename(columns=names, inplace=True)
return process_movielens(ratings, sort=sort)
DATASETS = [k.replace('load_', '') for k in locals().keys() if "load_" in k]
def get_dataset_name(filename):
for dataset in DATASETS:
if dataset in filename.replace('-', '_').lower():
return dataset
raise NotImplementedError
def implicit_load(filename, sort=True):
func = globals()["load_" + get_dataset_name(filename)]
return func(filename, sort=sort)
| apache-2.0 |
stochasticHydroTools/RotationalDiffusion | single_non_sphere/non_sphere.py | 1 | 8764 | from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from quaternion_integrator.quaternion import Quaternion
num_blobs = 7
A = 0.265*np.sqrt(3./2.) # radius of blob in um
VISCOSITY = 8.9e-4
TOTAL_WEIGHT = 1.*0.0000000002*(9.8*1e6) # weight of entire boomerang particle
WEIGHT = [TOTAL_WEIGHT/num_blobs for i in range(num_blobs)] # weight of individual blobs
KT = 300.*1.3806488e-5
REPULSION_STRENGTH = 7.5 * KT
DEBYE_LENGTH = 0.5*A
max_height = KT/TOTAL_WEIGHT*12 + A + 4.*DEBYE_LENGTH
class InvalidProbability(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def non_sphere_GB(location, orientation):
''' Return exp(-U/kT) for the given location and orientation.'''
r_vectors = get_boomerang_r_vectors(location, orientation)
# Add gravity to potential.
#for k in range(len(r_vectors)):
# if r_vectors[k][2] < A:
# return 0.0
U = 0.
for k in range(len(r_vectors)):
if r_vectors[k][2] < A:
return 0.0
U += WEIGHT[k] * r_vectors[k][2]
h = r_vectors[k][2]
# Add repulsion to potential.
U += ( REPULSION_STRENGTH * np.exp(-1.*(h -A)/DEBYE_LENGTH) / (h-A) )
#U *= REPULSION_STRENGTH
#U += np.sum(WEIGHT[0]*r_vectors[:,2])
return np.exp(-1. * U/KT)
def get_boomerang_r_vectors(location, orientation, blob_distance = .525):
'''Get the vectors of the 7 blobs used to discretize the boomerang.
7 O
6 O
5 O
O-O-O-O
4 3 2 1
The location is the location of the Blob at the apex.
Initial configuration is in the
x-y plane, with arm 1-2-3 pointing in the positive x direction, and arm
4-5-6 pointing in the positive y direction.
Seperation between blobs is currently hard coded at 0.525 um
Donev: Even if you hard code a value (which I recommend against --
you will learn some new programming skills trying to figure out how to pass this as an argument.
Ask Floren -- python supports optional arguments with default values. He has had to do something like this.
But even if hard-coded, write
const=0.525
and then use const in the code. This way you can change it with one-line.
It is a really really bad idea to hard-code values like this..
'''
num_blobs = 7
initial_configuration = [ np.array([3*blob_distance, 0., 0.]),
np.array([2*blob_distance, 0., 0.]),
np.array([blob_distance, 0., 0.]),
np.array([0., 0., 0.]),
np.array([0., blob_distance, 0.]),
np.array([0., 2*blob_distance, 0.]),
np.array([0., 3*blob_distance, 0.])]
rotation_matrix = orientation.rotation_matrix()
rotated_configuration = [np.dot(rotation_matrix,vec) + location for vec in initial_configuration]
#rotated_configuration = []
#for vec in initial_configuration:
# rotated_configuration.append(np.dot(rotation_matrix, vec)
# + location)
return rotated_configuration
def generate_non_sphere_partition(partition_steps):
partitionZ = 0.
#for i in range(100):
# new_location = [0., 0., np.random.uniform(A, max_height)]
# partitionZ += non_sphere_GB(location,new_location)
orientation = Quaternion([0,0,0,0])
new_location = np.array([0., 0., 0.])
for i in range(partition_steps):
orientation.random_orientation()
new_location[2] = np.random.uniform(A, max_height)
sample = non_sphere_GB(new_location, orientation)
if sample > partitionZ:
partitionZ = sample
return partitionZ
def non_sphere_rejection(partitionZ):
# generate heights and their corresponding probabilities until a height passes unrejected
orientation = Quaternion([0.,0.,0.,0.])
new_location = np.array([0., 0., 0.])
while True:
orientation.random_orientation()
new_location[2] = np.random.uniform(A, max_height-A)
acceptance_prob = non_sphere_GB(new_location, orientation) / partitionZ
if acceptance_prob > 1:
raise InvalidProbability('Acceptance Probability is greater than 1')
if np.random.uniform(0., 1.) < acceptance_prob: # the rejection part of the algorithm.
return [new_location, orientation]
# calculate an num_points numbver of points given by directly computing the Gibbs-Boltzmann distribution
# P(h) = exp(-U(h)/KT) / integral(exp(U(h)/KT)dh)
# calculated using the trapezoidal rule
# Donev: Explain to me in person what this does
def analytical_distribution_non_sphere(num_points):
# heights are sampled evenly from the chosen bounds, using linspace
# because linspace includes starting value A, the first index in x is ignored
# if x[0] is included, then in the calculation of potential energy U, h-A = 0
# and an exception will be thrown
# x = np.linspace(A, max_height, num_points)
# orientations = []
# for i in range(num_points):
# theta = np.random.normal(0., 1., 4)
# orientations.append(Quaternion(theta/np.linalg.norm(theta)))
# y = []
# deltaX = x[1] - x[0]
# numerator, denominator = 0., 0.
#
# # add the bounds to the integral value
# # ignore x[0] = A
# integral = 0.5*(non_sphere_GB([0., 0., x[1]], orientations[0]) +
# non_sphere_GB([0., 0., max_height], orientations[num_points-1]))
# # iterate over the rest of the heights
# for k in range(2, num_points):
# integral += non_sphere_GB([0., 0., x[k]], orientations[k])
# # multiply by the change in x to complete the integral calculation
# integral *= deltaX
#
# # now that we have the partition function that the integral represents
# # we can calculate all the y positions of the distribution
# # again ignore x[0] = A
# j = 0
# for h in x[1:]:
# numerator = non_sphere_GB([0., 0., h], orientations[j])
# y.append(numerator/integral)
# j+=1
x = np.linspace(A, max_height, num_points)
y = np.zeros(num_points-1, dtype = float)
num_angles = 1000
deltaX = x[1] - x[0]
integral = .0
firstBar, lastBar = 0., 0.
orientation = Quaternion([0,0,0,0]) # create a quaternion object
for i in range(num_angles):
orientation.random_orientation()
firstBar += non_sphere_GB([0, 0, x[1]], orientation)
firstBar /= num_angles
for i in range(num_angles):
orientation.random_orientation()
lastBar += non_sphere_GB([0, 0, x[num_points-1]], orientation)
lastBar /= num_angles
integral += (firstBar + lastBar) *.5
sample_GB = np.zeros(num_angles, dtype = float)
for i in range(2, num_points-1):
for j in range(num_angles):
orientation.random_orientation()
sample_GB[j] = non_sphere_GB(np.array([0, 0, x[i]]), orientation)
integral += np.average(sample_GB)
integral *= deltaX
for i in range(x[1:].size):
numerator = 0.
for j in range(num_angles):
orientation.random_orientation()
numerator += non_sphere_GB([0., 0., x[i+1]], orientation)
numerator /= num_angles
y[i] = (numerator/integral)
return x[1:], y
# generate the histogram of the heights by reading in the heights from the given file to x
# and plot the analytical distribution curve given by x and y
# bar width h chosen to be approximately n_steps^(-1/5)
# so for 1,000,000 steps, 357 bars are used for max_height ~ 22.5 um
def plot_distribution(locationsFile, analytical_x, analytical_y, n_steps, color):
heights = np.loadtxt(locationsFile, float)
# the hist function returned a 3rd item and I'm not sure how best to handle it yet
# so there is a throwaway variable trash
numBars = int(max_height // (n_steps**(-1/5.)))
binValue, xBinLocations, trash = plt.hist(heights, numBars, normed=1, facecolor=color, alpha=0.75)
plt.hist(heights, numBars, normed=1, facecolor=color, alpha=0.75)
# add error bars to histogram Nx = # samples in bin h = bin width
# N = total number of samples generated
# error bar length = (4 * sqrt(Nx)) / (h*N)
binWidth = xBinLocations[1] - xBinLocations[0]
xError, yError, confidence = [], [], []
for i in range(binValue.size):
xError.append( (xBinLocations[i+1]+xBinLocations[i]) / 2) # center bin i
yError.append(binValue[i]) # height of bin i
numSamples = binWidth * binValue[i] * n_steps # samples in bin i
confidence.append( (4 * np.sqrt(numSamples)) / (binWidth * n_steps))
#plt.errorbar(xError,yError,yerr=confidence,fmt='r.')
# p, q = [], []
# skip = 1000
# size = 100000
# for i in range(0, size-skip, skip):
# average = 0.
# for j in range(i,i+skip):
# average += analytical_y[j]
# #print j
# average /= float(skip)
# #print("%f %f" % (analytical_x[i], average))
# p.append(analytical_x[i])
# q.append(average)
plt.plot(analytical_x, analytical_y, 'b.-', linewidth=1.5)
#plt.plot(p, q, 'b-', linewidth=2)
plt.title('Probability distribution of the height z of a single boomerang near a wall\n' +
'Green: histogram of sampled heights Blue: GB distribution')
plt.xlabel('z (microns)')
plt.ylabel('P(z)')
plt.axis([0, 12.5, 0, .35])
plt.show()
| gpl-3.0 |
massmutual/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/examples/pylab_examples/pie_demo2.py | 6 | 1620 | """
Make a pie charts of varying size - see
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie for the docstring.
This example shows a basic pie charts with labels optional features,
like autolabeling the percentage, offsetting a slice with "explode"
and adding a shadow, in different sizes.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Some data
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15, 30, 45, 10]
explode=(0, 0.05, 0, 0)
# Make square figures and axes
the_grid = GridSpec(2, 2)
plt.subplot(the_grid[0, 0], aspect=1)
plt.pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
plt.subplot(the_grid[0, 1], aspect=1)
plt.pie(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=True)
plt.subplot(the_grid[1, 0], aspect=1)
patches, texts, autotexts = plt.pie(fracs, labels=labels,
autopct='%.0f%%',
shadow=True, radius=0.5)
# Make the labels on the small plot easier to read.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.subplot(the_grid[1, 1], aspect=1)
patches, texts, autotexts = plt.pie(fracs, explode=explode,
labels=labels, autopct='%.0f%%',
shadow=False, radius=0.5)
# Turn off shadow for tiny plot
# with exploded slice.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.show()
| unlicense |
Titan-C/scikit-learn | sklearn/preprocessing/tests/test_data.py | 12 | 75601 | # Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
[u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-2)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
# sparse support
# TODO: rng should be seeded once we drop support for older versions of
# scipy (< 0.13) that don't support seeding.
X = sparse.rand(n_samples, 1, density=.99, format='csc')
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-1)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert_equal(transformer.transform(-10), transformer.transform(np.min(X)))
assert_equal(transformer.transform(10), transformer.transform(np.max(X)))
assert_equal(transformer.inverse_transform(-10),
transformer.inverse_transform(
np.min(transformer.references_)))
assert_equal(transformer.inverse_transform(10),
transformer.inverse_transform(
np.max(transformer.references_)))
def test_quantile_transform_and_inverse():
# iris dataset
X = iris.data
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
| bsd-3-clause |
jorik041/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/test_msgpack/test_unpack.py | 9 | 1993 | from io import BytesIO
import sys
from pandas.msgpack import Unpacker, packb, OutOfData, ExtType
import pandas.util.testing as tm
import nose
class TestUnpack(tm.TestCase):
def test_unpack_array_header_from_file(self):
f = BytesIO(packb([1, 2, 3, 4]))
unpacker = Unpacker(f)
assert unpacker.read_array_header() == 4
assert unpacker.unpack() == 1
assert unpacker.unpack() == 2
assert unpacker.unpack() == 3
assert unpacker.unpack() == 4
self.assertRaises(OutOfData, unpacker.unpack)
def test_unpacker_hook_refcnt(self):
if not hasattr(sys, 'getrefcount'):
raise nose.SkipTest('no sys.getrefcount()')
result = []
def hook(x):
result.append(x)
return x
basecnt = sys.getrefcount(hook)
up = Unpacker(object_hook=hook, list_hook=hook)
assert sys.getrefcount(hook) >= basecnt + 2
up.feed(packb([{}]))
up.feed(packb([{}]))
assert up.unpack() == [{}]
assert up.unpack() == [{}]
assert result == [{}, [{}], {}, [{}]]
del up
assert sys.getrefcount(hook) == basecnt
def test_unpacker_ext_hook(self):
class MyUnpacker(Unpacker):
def __init__(self):
super(MyUnpacker, self).__init__(ext_hook=self._hook,
encoding='utf-8')
def _hook(self, code, data):
if code == 1:
return int(data)
else:
return ExtType(code, data)
unpacker = MyUnpacker()
unpacker.feed(packb({'a': 1}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 1}
unpacker.feed(packb({'a': ExtType(1, b'123')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': 123}
unpacker.feed(packb({'a': ExtType(2, b'321')}, encoding='utf-8'))
assert unpacker.unpack() == {'a': ExtType(2, b'321')}
| gpl-3.0 |
izaid/dynd-python | dynd/benchmarks/benchrun.py | 8 | 5322 | """
Benchrun is a Python script for defining and running performance benchmarks.
It allows you to run a benchmark for different versions of the code and for
different values of an input parameter, and automatically generates tables
that compare the results.
A benchmark is defined by creating a subclass of Benchmark.
The subclass should define a method run() that executes the code
to be timed and returns the elapsed time in seconds (as a float),
or None if the benchmark should be skipped.
This file was originally taken from https://code.google.com/p/benchrun/ under the MIT License,
but has been modified since.
"""
from __future__ import print_function
import math
import sys
if sys.platform=='win32':
from time import clock
else:
from time import time as clock
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302478
def combinations(*seqin):
def rloop(seqin,comb):
if seqin:
for item in seqin[0]:
newcomb = comb + [item]
for item in rloop(seqin[1:],newcomb):
yield item
else:
yield comb
return rloop(seqin,[])
def _mean(n = 10):
def wrap(func):
def wrapper(*args, **kwds):
results = [func(*args, **kwds) for i in range(n)]
return math.fsum(results) / n
return wrapper
return wrap
def mean(callable_or_value):
if callable(callable_or_value):
return _mean()(callable_or_value)
return _mean(callable_or_value)
def _median(n = 10):
def wrap(func):
def wrapper(*args, **kwds):
results = sorted(func(*args, **kwds) for i in range(n))
i = n // 2
if n % 2 == 1:
return results[i]
return (results[i - 1] + results[i]) / 2.0
return wrapper
return wrap
def median(callable_or_value):
if callable(callable_or_value):
return _median()(callable_or_value)
return _median(callable_or_value)
class Benchmark:
sort_by = []
reference = None
def __init__(self):
self.pnames = []
self.pvalues = []
self.results = []
self.results_dict = {}
for pname in self.parameters:
value = getattr(self, pname)
self.pnames.append(pname)
self.pvalues.append(value)
self.pcombos = list(combinations(*self.pvalues))
if self.reference:
self.reference_param = self.reference[0]
self.reference_value = self.reference[1]
def time_all(self):
"""Run benchmark for all versions and parameters."""
for params in self.pcombos:
args = dict(zip(self.pnames, params))
t = self.run(**args)
self.results.append(tuple(params) + (t,))
self.results_dict[tuple(params)] = t
def sort_results(self):
sort_keys = []
for name in self.sort_by:
sort_keys += [self.pnames.index(name)]
for i, name in enumerate(self.pnames):
if i not in sort_keys:
sort_keys += [i]
def key(v):
return list(v[i] for i in sort_keys)
self.results.sort(key=key)
def get_factor(self, pvalues, time):
if not self.reference or not time:
return None
pvalues = list(pvalues)
i = self.pnames.index(self.reference_param)
if pvalues[i] == self.reference_value:
return None
else:
pvalues[i] = self.reference_value
ref = self.results_dict[tuple(pvalues)]
if ref == None:
return None
return ref / time
def print_result(self):
"""Run benchmark for all versions and parameters and print results
in tabular form to the standard output."""
self.time_all()
self.sort_results()
print("=" * 78)
print()
print(self.__class__.__name__)
print(self.__doc__, "\n")
colwidth = 15
reftimes = {}
ts = "seconds"
if self.reference:
ts += " (x faster than " + (str(self.reference_value)) + ")"
print(" ", " ".join([str(r).ljust(colwidth) for r in self.pnames + [ts]]))
print("-"*79)
rows = []
for vals in self.results:
pvalues = vals[:-1]
time = vals[-1]
if time == None:
stime = "(n/a)"
else:
stime = "%.8f" % time
factor = self.get_factor(pvalues, time)
if factor != None:
stime += (" (%.2f)" % factor)
vals = pvalues + (stime,)
row = [str(val).ljust(colwidth) for val in vals]
print(" ", " ".join(row))
print()
def plot_result(self, loglog = False):
import matplotlib
import matplotlib.pyplot
self.time_all()
self.sort_results()
if loglog:
from matplotlib.pyplot import loglog as plot
else:
from matplotlib.pyplot import plot
plot(*zip(*self.results), label = self.__class__.__name__, marker = "o", linestyle = '--', linewidth = 2)
matplotlib.pyplot.xlabel(self.pnames[0])
matplotlib.pyplot.ylabel("seconds")
matplotlib.pyplot.legend(loc = 2, markerscale = 0)
| bsd-2-clause |
jones139/OpenSeizureDetector | fitbit_version/sd/sd/main.py | 2 | 7082 | from __future__ import print_function
import datetime
import os
import sys
import time
import uuid
import logging
import logging.handlers
logger = logging.getLogger(__name__)
import requests
#from .conversation import Conversation
from .config import Config, ConfigError
from .tracker import FitbitClient
from .utils import a2x
from . import dongle as dgl
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import Image
FitBitUUID = uuid.UUID('{ADAB0000-6E7D-4601-BDA2-BFFAA68956BA}')
def syncAllTrackers(config):
logger.debug('%s initialising', os.path.basename(sys.argv[0]))
dongle = dgl.FitBitDongle(config.logSize)
if not dongle.setup():
logger.error("No dongle connected, aborting")
return
fitbit = FitbitClient(dongle)
if not fitbit.disconnect():
logger.error("Dirty state, not able to start synchronisation.")
fitbit.exhaust()
return
if not fitbit.getDongleInfo():
logger.warning('Failed to get connected Fitbit dongle information')
logger.info('Discovering trackers to synchronize')
trackers = [t for t in fitbit.discover(FitBitUUID)]
logger.info('%d trackers discovered', len(trackers))
for tracker in trackers:
logger.debug('Discovered tracker with ID %s',
a2x(tracker.id, delim=""))
for tracker in trackers:
trackerid = a2x(tracker.id, delim="")
if (trackerid=="3C971E6292CA"):
logger.info('Attempting to synchronize tracker %s', trackerid)
logger.debug('Establishing link with tracker')
if not (fitbit.establishLink(tracker) and fitbit.toggleTxPipe(True)
and fitbit.initializeAirlink(tracker)):
logger.warning('Unable to connect with tracker %s. Skipping',
trackerid)
tracker.status = 'Unable to establish a connection.'
yield tracker
continue
# create output directory if necessary
dirname = os.path.expanduser(os.path.join(config.dumpDir,
trackerid))
if not os.path.exists(dirname):
logger.debug("Creating non-existent directory for dumps %s",
dirname)
os.makedirs(dirname)
# Collect several dumps in rapid succession.
dumparr = []
for ndump in range(0,300):
logger.info('Getting data from tracker')
dump = fitbit.getDump(fitbit.MICRODUMP)
if dump is None:
logger.error("Error downloading the dump from tracker")
tracker.status = "Failed to download the dump"
yield tracker
continue
filename = os.path.join(dirname, 'dump-%d-%d.txt' %
(int(time.time()),ndump))
#dump.toFile(filename)
dumparr.append(dump.data)
dumparr_np = np.array(dumparr)
print(dumparr_np.shape)
img = Image.fromarray(dumparr_np.astype(np.uint8))
img.save("image.png")
dumparr_np = dumparr_np/255.
print(dumparr_np)
imgplot = plt.imshow(dumparr_np)
plt.show()
logger.debug('Disconnecting from tracker')
if not (fitbit.terminateAirlink() and fitbit.toggleTxPipe(False) and fitbit.ceaseLink()):
logger.warning('Error while disconnecting from tracker %s',
trackerid)
tracker.status += " (Error disconnecting)"
yield tracker
PERMISSION_DENIED_HELP = """
To be able to run the fitbit utility as a non-privileged user, you first
should install a 'udev rule' that lower the permissions needed to access the
fitbit dongle. In order to do so, as root, create the file
/etc/udev/rules.d/99-fitbit.rules with the following content (in one line):
SUBSYSTEM=="usb", ATTR{idVendor}=="%(VID)x", ATTR{idProduct}=="%(PID)x", SYMLINK+="fitbit", MODE="0666"
The dongle must then be removed and reinserted to receive the new permissions.""" % {
'VID': dgl.FitBitDongle.VID, 'PID': dgl.FitBitDongle.PID}
def sync(config):
statuses = []
try:
for tracker in syncAllTrackers(config):
statuses.append("Tracker: %s: %s" % (a2x(tracker.id, ''),
tracker.status))
except dgl.PermissionDeniedException:
print(PERMISSION_DENIED_HELP)
return
print('\n'.join(statuses))
def main():
""" This is the entry point """
# Set the null handler to avoid complaining about no handler presents
logging.getLogger("sd").addHandler(logging.NullHandler())
try:
config = Config()
config.parseSystemConfig()
config.parseUserConfig()
# This gives us the config file name
config.parseArgs()
if config.rcConfigName:
config.load(config.rcConfigName)
# We need to re-apply our arguments as last
config.applyArgs()
except ConfigError as e:
print(e, file=sys.stderr)
sys.exit(os.EX_CONFIG)
# --- All logging actions before this line are not active ---
# This means that the whole Config parsing is not logged because we don't
# know which logLevel we should use.
if config.syslog:
# Syslog messages must have the time/name first.
format = ('%(asctime)s ' + 'sd' + ': '
'%(levelname)s: %(module)s: %(message)s')
# TODO: Make address into a config option.
handler = logging.handlers.SysLogHandler(
address='/dev/log',
facility=logging.handlers.SysLogHandler.LOG_DAEMON)
handler.setFormatter(logging.Formatter(fmt=format))
core_logger = logging.getLogger('sd')
core_logger.handlers = []
core_logger.addHandler(handler)
core_logger.setLevel(config.logLevel)
else:
format = '%(asctime)s:%(levelname)s: %(message)s'
logging.basicConfig(format=format, level=config.logLevel)
# --- All logger actions from now on will be effective ---
logger.debug("Configuration: %s", config)
#ui = InteractiveUI(config.hardcoded_ui)
try:
sync(config)
except:
logger.critical("# A serious error happened, which is probably due to a")
logger.critical("# programming error. Please open a new issue with the following")
logger.critical("# information on the galileo bug tracker:")
logger.critical("# https://bitbucket.org/benallard/galileo/issues/new")
if hasattr(dgl, 'log'):
logger.critical('# Last communications:')
for comm in dgl.log.getData():
dir, dat = comm
logger.critical('# %s %s' % ({dgl.IN: '<', dgl.OUT: '>'}.get(dir, '-'), a2x(dat or [])))
logger.critical("#", exc_info=True)
sys.exit(os.EX_SOFTWARE)
| gpl-3.0 |
dandanvidi/kvivo_max | scripts/catalytic_rates.py | 3 | 14326 | import pandas as pd
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
import numpy as np
from model_addons import add_to_model
from copy import deepcopy
class rates(object):
def __init__(self):
self.model = create_cobra_model_from_sbml_file("../data/iJO1366.xml")
# Modify model
convert_to_irreversible(self.model)
self.rxns = dict([(r.id, r) for r in self.model.reactions])
self.genes = dict([(g.id, g) for g in self.model.genes])
add_to_model(self.model)
self.include_specific_isozmyes()
self.gc = pd.DataFrame.from_csv("../data/growth_conditions.csv")
flux = pd.DataFrame.from_csv('../data/flux[mmol_gCDW_h].csv')
self.v = self._convert_mmol_gCDW_h_to_mmol_gCDW_s(flux)
# PPKr_reverse reaction is used for ATP generation from ADP
# in the FBA model. Nevertheless, acording to EcoCyc, it is used to
# to generate polyP (inorganic phosphate) chains from ATP and it is not
# part of the oxidative phosphorilation, thus removed from rate calculations
if 'PPKr_reverse' in self.v.index:
self.v.drop('PPKr_reverse', axis=0, inplace=True)
self.enzymatic_reactions = self._enzymatic_reactions()
self.homomeric_reactions = self.reactions_by_homomeric_enzymes()
proteins_copies_fL = pd.DataFrame.from_csv('../data/meta_abundance[copies_fL].csv')
self.proteins_mmol_gCDW = self._convert_copies_fL_to_mmol_gCDW(proteins_copies_fL)
self.E = self.map_expression_by_reaction()
self.kapp = self.get_kapp() # per subunit
self.SA = self.get_specific_activity()
self.kcat = pd.DataFrame.from_csv("../data/kcat_data.csv")
self.p_per_as = (self.kcat['polypeptides per complex']
/ self.kcat['catalytic sites per complex'])
self.kmax = self.get_kmax(self.kapp)
self.SAmax = self.get_maximum_specific_activity(self.SA)
def include_specific_isozmyes(self):
'''
Possible add-ons to the list of unique homomeric enzymes
obtained by the function "reactions_to_unique_enzyme".
These isoenzymes are known to have only one active isoenzyme
across all tested conditions and therefore were manually added
'''
pairs = [
('METS','b3829'),# metE - cobalamin-independent homocysteine transmethylase - The aerobic enzyme - the other isoenzyme operates under anaerobic conditions
('HCO3E','b0126'),# can - carbonic anhydrase
('PFK','b3916'), # 6-phosphofructokinase - pfkB accounts for above 90% of enzymatic activity (EcoCyc)
('RPI','b2914'), # ribose-5-phosphate isomerase A
('RPE', 'b3386') # ribulose-5-phosphate 3-epimerase - other isozyme is according to predicted activity.
]
for (r,g) in pairs:
self.rxns[r]._genes = [self.model.genes.get_by_id(g)]
self.rxns[r].gene_reaction_rule = '('+g+')'
def _enzymatic_reactions(self):
'''
Returns a list of cobra Reaction objects catalyzed by enzymes.
'''
reactions = filter(lambda r:len(r.genes)>=1, self.model.reactions)
genes = [list(r.genes) for r in reactions]
return dict(zip(reactions,genes))
def reactions_by_unique_enzyme(self):
'''
Returns a list of reactions (as cobra REACTION objects)
in the model catalyzed by unique enzymes. Enzymes can either
be homomeric or hetorometic complexes.
'''
one_enzyme_reac = filter(lambda r: 'or' not in r.gene_reaction_rule,
self.enzymatic_reactions.keys())
genes = [list(r.genes) for r in one_enzyme_reac]
return dict(zip(one_enzyme_reac,genes))
def reactions_by_homomeric_enzymes(self):
'''
Returns a list of reactions (as cobra REACTION objects)
in the model catalyzed by unique enzymes which are composed
of a single polypeptide chain, i.e., unique homomeric enzymes.
'''
homomers = filter(lambda r: len(r.genes)==1,
self.enzymatic_reactions.keys())
genes = [list(r.genes)[0] for r in homomers]
return dict(zip(homomers,genes))
def _convert_copies_fL_to_mmol_gCDW(self, expression_data):
'''
Convertes the units of proteomics data (usually reported in
copies per fL of cytoplasm) to units of mmol per gCDW.
This unit conversion is performed to match flux units from
metabolic models (usually given in mmol/gCDW/h)
'''
rho = 1100 # average cell density gr/liter
DW_fraction = 0.3 # fraction of DW of cells
Avogadro = 6.02214129 # Avogadro's number "exponent-less"
expression_data[expression_data<10] = np.nan
expression_data /= (Avogadro*1e5)
expression_data /= (rho * DW_fraction)
return expression_data
def _convert_mmol_gCDW_h_to_mmol_gCDW_s(self, flux_data):
'''
Convertes the units of flux data (usually reported in
mmol/gCDW/h) to units of mmol/gCDW per second.
This unit conversion is performed to allow calculation of
turnover rates in units of s^-1, as traditioanlly excepted.
'''
flux_data /= 3600
return flux_data
def _convert_mmol_gCDW_to_mg_gCDW(self, expression_data):
genes = set(self.genes.keys()) & (set(expression_data.index))
mass = [self.genes[g].MW for g in genes]
MW = pd.Series(index=genes, data=mass)
return expression_data.loc[MW.index].mul(MW, axis=0)
def map_expression_by_reaction(self):
gc = self.v.columns & self.proteins_mmol_gCDW.columns
tmp = {k.id:v.id for k,v in self.reactions_by_homomeric_enzymes().iteritems()}
E = pd.DataFrame(index=tmp.keys(),columns=gc)
for i in E.index:
if tmp[i] in self.proteins_mmol_gCDW.index:
E.loc[i] = self.proteins_mmol_gCDW.loc[tmp[i]]
E.dropna(how='all', inplace=True)
return E
def get_kapp(self):
'''
Calculates the catalytic rate of a single subunit of a homomeric
enzyme for a given reaction, by dividing the flux through the
reaction by the abundance of the polypeptide chain that comprises
the enzyme.
Arguments:
flux [mmol/gCDW/s]
proteomics [mmol/gCDW]
Returns:
pandas dataframe with catalytic rates per polypeptide chain
in units of s^-1. Rows are reactions, columns are conditions
'''
rate = self.v.div(self.E)
rate.replace([0, np.inf, -np.inf], np.nan, inplace=True)
rate.dropna(how='all', inplace=True)
return rate
def get_kmax(self, kapp, minimal_conditions=5):
'''
Take the maximum rate of a given enzyme-reaction pair
across all conditions.
Arguments:
catalytic rate of enzyme-reaction pairs across conditions
as a pandas dataframe. Rows are reactions, columns are conditions
Returns:
Maximal rate for each enzyme-reaction pair, the condition in
which it was found, the metabolic pathway associated with
the reaction and the carbon source on which the cells were grown.
Notice that maximal rates are given per polypeptide chain and
per active site in two seperate columns.
Rate units are s^-1.
'''
kapp.dropna(thresh=minimal_conditions, inplace=True)
kmax = pd.DataFrame(index=kapp.index)
subsystems =[self.rxns[r].subsystem for r in kmax.index]
genes = [list(self.rxns[r].genes)[0].id for r in kmax.index]
names = [list(self.rxns[r].genes)[0].name for r in kmax.index]
kmax.index.name = 'reaction'
kmax['bnumber'] = genes
kmax['primary gene name (uniprot)'] = names
kmax['kmax per chain [s^-1]'] = kapp.max(axis=1)
tmp = self.kapp.loc[kmax.index].mul(self.p_per_as[kmax.index], axis=0)
kmax['kmax per active site [s-1]'] = tmp.max(axis=1)
kmax['subsystem'] = subsystems
kmax['condition'] = kapp.idxmax(axis=1)
return kmax
def get_specific_activity(self):
'''
Calculates the specific activity in units of umol/mg/min
for all reactions in the model. The sum of all associated
polypeptide chains is used as the molecular weight of the enzyme
and the flux through the reaction is divided by this weight.
Notice that if a reaction can be carried by several different enzymes,
i.e., isoenzymes, the returned values are a weighted average of the
rate of the enzymes by their mass.
Arguments:
flux [mmol/gCDW/s]
proteomics [mmol/gCDW]
Returns:
pandas dataframe with specific activeites of enzymes
in units of umol/mg/min. Rows are reactions, columns are conditions
'''
weighted_mass = self._convert_mmol_gCDW_to_mg_gCDW(self.proteins_mmol_gCDW)
reactions = map(lambda x: x.id, self.enzymatic_reactions)
SA = pd.DataFrame(index=reactions, columns=self.gc.index)
for r in self.enzymatic_reactions:
genes = map(lambda x: x.id, r.genes)
try:
SA.loc[r.id] = self.v.loc[r.id] / weighted_mass.loc[genes].sum()
except KeyError:
continue
SA.replace([0, np.inf, -np.inf], np.nan, inplace=True)
SA.dropna(how='all', inplace=True)
return SA * 1000 * 60
def get_maximum_specific_activity(self, specific_activity, minimal_conditions=5):
'''
Take the maximum rate of a given enzyme-reaction pair
across all conditions.
Arguments:
specific activities of enzyme-reaction pairs across conditions
as a pandas dataframe. Rows are reactions, columns are conditions
Returns:
Maximal specific activity for each enzyme-reaction pair,
the condition in which it was found, the metabolic pathway
associated with the reaction and the carbon source on which
the cells were grown.
Notice that maximal specific activities are given for the sum
of all associated enzymes, thus represent the weighted average
of the specific activites of the isoenzymes. Being a weighted
average, it means that the values underestimate the maximal
potential rate.
'''
specific_activity.dropna(thresh=minimal_conditions, inplace=True)
SAmax = pd.DataFrame(index=specific_activity.index)
reactions = map(self.model.reactions.get_by_id, SAmax.index)
subsystems = map(lambda r: r.subsystem, reactions)
SAmax['max specific activity [umol/mg/min]'] = specific_activity.max(axis=1)
SAmax['subsystem'] = subsystems
SAmax['condition'] = specific_activity.idxmax(axis=1)
return SAmax
def get_second_max(self):
'''
Finds the second maximal kapp value by reaction
Arguments:
self
Returns:
Pandas Series with reactions as index and snd max as values
'''
rate = self.kapp.mul(self.p_per_as, axis=0)
rate.dropna(how='all', inplace=True)
second = pd.Series(index=rate.index)
for r in rate.index:
array = sorted(rate.loc[r])
second[r] = array[-2]
return second
def _perform_pFBA(self, model, cs='glc', gr=1, ur=10):
from cobra.flux_analysis.parsimonious import optimize_minimal_flux
rxns = dict([(r.id, r) for r in model.reactions])
rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized
try:
rxns['EX_' + cs + '_e'].lower_bound = -ur # redefine sole carbon source uptake reaction in mmol/gr/h
except:
print cs, ur
rxns['EX_glc_e'].lower_bound = -ur
rxns['Ec_biomass_iJO1366_core_53p95M'].upper_bound = gr
print "solving pFBA",
optimize_minimal_flux(model, already_irreversible=True)
flux_dist = pd.DataFrame(model.solution.x_dict.items()).set_index(0)
return flux_dist
def _overwrite_pFBA_file(self):
reactions = [r.id for r in self.model.reactions]
fluxes = pd.DataFrame(index=reactions, columns=self.gc.index)
for c in self.gc.iterrows():
gr = c[1]['growth rate [h-1]']
cs = c[1]['media_key']
ur = c[1]['uptake rate [mmol gCDW-1 h-1]']
if np.isnan(ur):
ur = 18.5
model = deepcopy(self.model)
fluxes[c[0]] = self._perform_pFBA(model, cs, gr, ur)
print "- %s" %c[0]
fluxes.index.name = 'reaction'
''' export results '''
fluxes.to_csv('../data/flux[mmol_gCDW_h].csv')
if __name__ == "__main__":
R = rates()
kcat = R.kcat['kcat per active site [s-1]'].dropna()
kmax = R.kmax['kmax per active site [s-1]'].dropna()
index = kcat.index & kmax.index
kcat = kcat[index]
kmax = kmax[index]
from scipy import stats
pearson = stats.pearsonr(np.log(kcat), np.log(kmax))
spearman = stats.spearmanr(kcat, kmax)
kendal = stats.kendalltau(kcat, kmax)
| mit |
mtpain/metacorps | projects/common/analysis.py | 1 | 12228 | import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from collections import OrderedDict, Counter
from copy import deepcopy
from datetime import datetime, timedelta
from urllib.parse import urlparse
from .export_project import ProjectExporter
from app.models import IatvCorpus
DEFAULT_FACET_WORDS = [
'attack',
'hit',
'beat',
'grenade',
'slap',
'knock',
'jugular',
'smack',
'strangle',
'slug',
]
def get_project_data_frame(project_name):
'''
Convenience method for creating a newly initialized instance of the
Analyzer class. Currently the only argument is year since the projects all
contain a year. In the future we may want to match some other unique
element of a title, or create some other kind of wrapper to search
all Project names in the metacorps database.
Arguments:
project_name (str): name of project to be exported to an Analyzer
with DataFrame representation included as an attribute
'''
if type(project_name) is int:
project_name = str('Viomet Sep-Nov ' + str(project_name))
def is_url(s): return urlparse(project_name).hostname is not None
if is_url(project_name) or os.path.exists(project_name):
ret = pd.read_csv(project_name, na_values='',
parse_dates=['start_localtime'])
return ret
return ProjectExporter(project_name).export_dataframe()
def _select_range_and_pivot_subj_obj(date_range, counts_df, subj_obj):
rng_sub = counts_df[
date_range[0] <= counts_df.start_localtime
][
counts_df.start_localtime <= date_range[1]
]
rng_sub_sum = rng_sub.groupby(['network', subj_obj]).agg(sum)
ret = rng_sub_sum.reset_index().pivot(
index='network', columns=subj_obj, values='counts'
)
return ret
def _count_daily_subj_obj(df, sub_obj):
subs = df[['start_localtime', 'network', 'subjects', 'objects']]
subs.subjects = subs.subjects.map(lambda s: s.strip().lower())
subs.objects = subs.objects.map(lambda s: s.strip().lower())
try:
trcl = subs[
(subs[sub_obj].str.contains('hillary clinton') |
subs[sub_obj].str.contains('donald trump')) &
subs[sub_obj].str.contains('/').map(lambda b: not b) &
subs[sub_obj].str.contains('campaign').map(lambda b: not b)
]
except KeyError:
raise RuntimeError('sub_obj must be "subjects" or "objects"')
c = trcl.groupby(['start_localtime', 'network', sub_obj]).size()
ret_df = c.to_frame()
ret_df.columns = ['counts']
ret_df.reset_index(inplace=True)
# cleanup anything like 'republican nominee'
ret_df.loc[
:, sub_obj
][
ret_df[sub_obj].str.contains('donald trump')
] = 'donald trump'
ret_df.loc[
:, sub_obj
][
ret_df[sub_obj].str.contains('hillary clinton')
] = 'hillary clinton'
return ret_df
def _count_by_start_localtime(df,
column_list=['program_name',
'network',
'facet_word']):
'''
Count the number of instances grouped by column_list. Adds a 'counts'
column.
Arguments:
df (pandas.DataFrame): Analyzer.df attribute from Analyzer class
column_list (list): list of columns on which to groupby then count
Returns:
(pandas.DataFrame) counts per start_localtime of tuples with types
given in column_list
'''
all_cols = ['start_localtime'] + column_list
subs = df[all_cols]
c = subs.groupby(all_cols).size()
ret_df = c.to_frame()
ret_df.columns = ['counts']
ret_df.reset_index(inplace=True)
return ret_df
def shows_per_date(date_index, iatv_corpus, by_network=False):
'''
Arguments:
date_index (pandas.DatetimeIndex): Full index of dates covered by
data
iatv_corpus (app.models.IatvCorpus): Obtained, e.g., using
`iatv_corpus = IatvCorpus.objects.get(name='Viomet Sep-Nov 2016')`
by_network (bool): whether or not to do a faceted daily count
by network
Returns:
(pandas.Series) if by_network is False, (pandas.DataFrame)
if by_network is true.
'''
if type(iatv_corpus) is str:
iatv_corpus = IatvCorpus.objects(name=iatv_corpus)[0]
docs = iatv_corpus.documents
n_dates = len(date_index)
if not by_network:
# get all date/show name tuples & remove show re-runs from same date
prog_dates = set(
[
(d.program_name, d.start_localtime.date())
for d in docs
]
)
# count total number of shows on each date
# note we count the second entry of the tuples, which is just the
# date, excluding program name
shows_per_date = Counter(el[1] for el in prog_dates)
spd_series = pd.Series(
index=date_index,
data={'counts': np.zeros(n_dates)}
).sort_index()
for date in shows_per_date:
spd_series.loc[date] = shows_per_date[date]
return spd_series
else:
# get all date/network/show name tuples
# & remove show re-runs from same date
prog_dates = set(
[
(d.program_name, d.network, d.start_localtime.date())
for d in docs
]
)
# count total number of shows on each date for each network
# note we count the second entry of the tuples, which is just the
# date, excluding program name
shows_per_network_per_date = Counter(el[1:] for el in prog_dates)
n_dates = len(date_index)
spd_frame = pd.DataFrame(
index=date_index,
data={
'MSNBCW': np.zeros(n_dates),
'CNNW': np.zeros(n_dates),
'FOXNEWSW': np.zeros(n_dates)
}
)
for tup in shows_per_network_per_date:
spd_frame.loc[tup[1]][tup[0]] = shows_per_network_per_date[tup]
return spd_frame
def daily_metaphor_counts(df, date_index, by=None):
'''
Given an Analyzer.df, creates a pivot table with date_index as index. Will
group by the column names given in by. First deals with hourly data in
order to build a common index with hourly data, which is the data's
original format.
Arguments:
df (pandas.DataFrame)
by (list(str))
date_index (pandas.core.indexes.datetimes.DatetimeIndex): e.g.
`pd.date_range('2016-09-01', '2016-11-30', freq='D')`
'''
# get initial counts by localtime
if by is None:
by = []
counts = _count_by_start_localtime(df, column_list=by)
groupby_spec = [counts.start_localtime.dt.date, *counts[by]]
counts_gb = counts.groupby(groupby_spec).sum().reset_index()
ret = pd.pivot_table(counts_gb, index='start_localtime', values='counts',
columns=by, aggfunc='sum').fillna(0)
return ret
def daily_frequency(df, date_index, iatv_corpus, by=None):
if by is not None and 'network' in by:
spd = shows_per_date(date_index, iatv_corpus, by_network=True)
daily = daily_metaphor_counts(df, date_index, by=by)
ret = daily.div(spd, axis='rows')
elif by is None:
spd = shows_per_date(date_index, iatv_corpus)
daily = daily_metaphor_counts(df, date_index, by=by)
ret = daily.div(spd, axis='rows')
ret.columns = ['freq']
else:
spd = shows_per_date(date_index, iatv_corpus)
daily = daily_metaphor_counts(df, date_index, by=by)
ret = daily.div(spd, axis='rows')
return ret
class SubjectObjectData:
'''
Container for timeseries of instances of a specified subject, object, or
subject-object pair. For example, we may look for all instances where
Donald Trump is the subject of metaphorical violence, irrespective of the
object. We also may want to see where he is the object, no matter who
is the subject. Or, we may want to search for pairs, say, all instances
where Hillary Clinton is the subject of metaphorical violence and
Donald Trump is the object of metaphorical violence, or vice-versa.
from_analyzer_df is currently the most likely constructor to be used
'''
def __init__(self, data_frame, subj, obj, partition_infos=None):
self.data_frame = data_frame
self.subject = subj
self.object = obj
self.partition_infos = partition_infos
self.partition_data_frame = None
@classmethod
def from_analyzer_df(cls, analyzer_df, subj=None, obj=None,
subj_contains=True, obj_contains=True,
date_range=None):
'''
Given an Analyzer instance's DataFrame, calculate the frequency of
metaphorical violence with a given subject, object,
or a subject-object pair.
Returns:
(SubjectObjectData) an initialized class. The data_frame attribute
will be filled with by-network counts of the specified subj/obj
configuration.
'''
if date_range is None:
pd.date_range('2016-09-01', '2016-11-30', freq='D')
pre = analyzer_df.fillna('')
def _match_checker(df, subj, obj, subj_contains, obj_contains):
'''
Returns list of booleans for selecting subject and object matches
'''
if subj is None and obj is None:
raise RuntimeError('subj and obj cannot both be None')
if subj is not None:
if subj_contains:
retSubj = list(df.subjects.str.contains(subj))
else:
retSubj = list(df.subjects == subj)
if obj is None:
ret = retSubj
# TODO could probably combine these, but now not clear how
if obj is not None:
if obj_contains:
retObj = list(df.objects.str.contains(obj))
else:
retObj = list(df.objects == obj)
if subj is None:
ret = retObj
else:
ret = [rs and ro for rs, ro in zip(retSubj, retObj)]
return ret
chooser = _match_checker(pre, subj, obj, subj_contains, obj_contains)
pre = pre[
chooser
]
# then do counts or frequencies as normal, since you have just
# subset the relevant rows.
counts_df = pd.DataFrame(
index=date_range, data=0.0,
columns=pd.Index(['MSNBCW', 'CNNW', 'FOXNEWSW'], name='network')
)
# there might be columns missing, so we have to insert into above zeros
to_insert_df = daily_metaphor_counts(pre, date_range, by=['network'])
for network in ['MSNBCW', 'CNNW', 'FOXNEWSW']:
if network in to_insert_df.columns:
for row in to_insert_df.itertuples():
counts_df.loc[row.Index][network] = \
row.__getattribute__(network)
return cls(counts_df, subj, obj)
def partition(self, partition_infos):
pass
def facet_word_count(analyzer_df, facet_word_index, by_network=True):
'''
Count the number of times each facet word has been used. If by_network is
True, compute the usage of each word by network.
Arguments:
analyzer_df (pandas.DataFrame): dataframe of the IatvCorpus annotations
by_network (bool): group each partition's word counts by network?
Returns:
(pandas.DataFrame) or (pandas.Series) of counts depending on by_network
'''
if by_network:
return analyzer_df.groupby(
['network', 'facet_word']
).size().unstack(level=0)[
['MSNBCW', 'CNNW', 'FOXNEWSW']
].loc[facet_word_index].fillna(0.0)
else:
return analyzer_df.groupby(
['facet_word']
).size().loc[facet_word_index].fillna(0.0)
| bsd-3-clause |
shahankhatch/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
lamblin/pylearn2 | pylearn2/scripts/plot_monitor.py | 3 | 9384 | #!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='upper center',
bbox_to_anchor=(0.5,-0.1))
# 0.046 is the size of 1 legend box
fig.subplots_adjust(bottom=0.11 + 0.046 * len(final_codes))
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
| bsd-3-clause |
sho-87/python-machine-learning | CNN/mnist_lasagne.py | 1 | 10782 | from __future__ import print_function
import sys
import os
import time
import lasagne
import numpy as np
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
from pylab import cm, imshow, show
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
return X_train, y_train, X_val, y_val, X_test, y_test
def build_cnn(input_var=None):
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
l_c1 = lasagne.layers.Conv2DLayer(
l_in, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify,
W=lasagne.init.GlorotUniform())
l_p1 = lasagne.layers.MaxPool2DLayer(l_c1, pool_size=(2, 2))
l_c2 = lasagne.layers.Conv2DLayer(
l_p1, num_filters=32, filter_size=(5, 5),
nonlinearity=lasagne.nonlinearities.rectify)
l_p2 = lasagne.layers.MaxPool2DLayer(l_c2, pool_size=(2, 2))
# A fully-connected layer of 256 units with 50% dropout on its inputs:
l_fc = lasagne.layers.DenseLayer(
lasagne.layers.dropout(l_p2, p=.5),
num_units=256,
nonlinearity=lasagne.nonlinearities.rectify)
# And, finally, the 10-unit output layer with 50% dropout on its inputs:
l_out = lasagne.layers.DenseLayer(
lasagne.layers.dropout(l_fc, p=.5),
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def main(model='cnn', batch_size=500, num_epochs=500):
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
print("Building model and compiling functions...")
network = build_cnn(input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=0.01, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# Create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
all_layers = lasagne.layers.get_all_param_values(network)
return all_layers
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Run the model
weights = main(batch_size=50, num_epochs=5)
c1_kernels = weights[0]
# Plot single example
image_num = np.random.randint(0, X_train.shape[0])
image_label = y_train[image_num]
image_array = X_train[image_num][0]
image_2d = np.reshape(image_array, (28, 28))
image_4d = image_2d.reshape(1, 1, 28, 28)
imshow(image_2d, cmap=cm.gray)
show()
print("Label: {}".format(image_label))
# Plot layer 1 kernels
fig = plt.figure()
fig.suptitle("Kernels")
for j in range(len(c1_kernels)):
ax = fig.add_subplot(6, 6, j+1)
ax.matshow(c1_kernels[j][0], cmap=cm.gray)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.tight_layout()
fig.subplots_adjust(top=0.90)
plt.show()
# Plot activation maps
x = T.tensor4().astype(theano.config.floatX)
conv_out = T.nnet.conv2d(input=x, filters=c1_kernels)
get_activity = theano.function([x], conv_out)
activation = get_activity(image_4d)
fig = plt.figure()
fig.suptitle("Feature Maps")
for j in range(len(c1_kernels)):
ax = fig.add_subplot(6, 6, j+1)
ax.matshow(activation[0][j], cmap=cm.gray)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.tight_layout()
fig.subplots_adjust(top=0.90)
plt.show()
| mit |
a2takashi/kaggle-challenge | Titanic/pre_process.py | 1 | 5701 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 23 22:32:16 2016
@author: takashi
"""
import numpy as np
import pandas as pd
pd.set_option("expand_frame_repr", False)
pd.set_option( "max_columns", 16)
pd.set_option( "max_rows", 8)
FILE_PATH_TRAIN_DATA = "train.csv"
FILE_PATH_PREDICT_DATA = "test.csv"
#KEYS_FOR_ML = ["Survived", "Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
KEYS_FOR_ML = ["Survived", "Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"]
KEYS_FOR_NORM = [ "Age", "SibSp", "Parch", "Fare"]
BINARIZED_CAT_DICT = {"Sex": ["male", "female"],
"Pclass": [ 1, 2, 3],
"Embarked": ["C","Q","S"],
}
def load_titanic_csv(csv_file_path):
"""
Import a Titanic csv file as Pandas DataFrame instance.
Parameters:
csv_file_path: file path of data for Kaggle Titanic Compe
Returns:
raw_df:
"""
raw_df = pd.read_csv(csv_file_path, header=0, index_col="PassengerId")
if not "Survived" in raw_df.keys():
raw_df["Survived"] = np.NaN
return raw_df
def txt_w_border(txt):
BORDER_CHARS = 15 * "#"
ret_txt = " ".join(["\n", BORDER_CHARS, txt, BORDER_CHARS])
return ret_txt
def print_info(df, start=0, end=4, details=1):
"""
Print some information of the input dataframe.
Parameters:
df: Dataframe to print its info
start: start index for sample
end: last index for sample
Returns:
sample_df: The first max 10 dataframe
"""
print txt_w_border("Info")
print df.info()
if details>1:
print txt_w_border("Statistics")
print df.describe(percentiles=[])
if details>2:
try:
sample_df = df[start:end].copy()
print txt_w_border("Samples, " + str(start) + " to " + str(end))
except:
sample_df = df.head()
print txt_w_border("Samples, df.head")
print sample_df
return None
def filter_cols(df):
"""
Pick up by keyword list.
Parameters:
df: Dataframe
(Option) keys: Keyword list
Returns:
filt_col_df: Filtered dataframe
"""
comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )
filt_col_df = df.copy()[comm_keys]
return filt_col_df
def binarize(df):
"""
Binarize categorial dataframe.
Parameters:
df: Dataframe
Returns:
out_df: Numerized dataframe
"""
comm_keys = list( set(df.keys()) & set(BINARIZED_CAT_DICT.keys()) )
ret_df = df.copy()
for key in comm_keys:
val = BINARIZED_CAT_DICT[key]
ret_df[key] = ret_df[key].astype("category")
ret_df[key] = ret_df[key].cat.set_categories(val)
ret_df = pd.get_dummies(ret_df, columns=comm_keys, drop_first=True).astype("float")
return ret_df
def normalize(df):
"""
Nomarize dataframe without NaN.
Fill NaN with kw["key_of_fillna"].
Add NaN col
Parameters:
df: Dataframe
(Option) key_of_fillna
Returns:
out_df: Nomarized dataframe
"""
comm_keys = list( set(df.keys()) & set(KEYS_FOR_NORM) )
ret_df = df.copy()
t = ret_df[comm_keys]
ret_df[comm_keys] = (t - t.mean()) / t.std()
return ret_df
def add_null_flag_cols(df, del_single_cat_cols=False):
keys_of_null_cols = [ k + "_null" for k in df.keys()]
ret_df = df.copy()
ret_df[keys_of_null_cols] = ret_df[df.keys()].isnull()
if del_single_cat_cols:
ret_df = pd.get_dummies(ret_df, columns=keys_of_null_cols, drop_first=True)
return ret_df
def merge_SibSp_Parch_to_FamSize(df):
ret_df = df.copy()
ret_df["FamSize"] = ret_df["SibSp"] + ret_df["Parch"]
ret_df.drop(["SibSp","Parch"], axis=1, inplace=True)
return ret_df
def pre_proc_per_df(df, del_single_cat_cols=False):
ret_df = df.copy()
# print txt_w_border("Merging SibSp and Parch to FamSize")
# ret_df = merge_SibSp_Parch_to_FamSize(ret_df)
print txt_w_border("Filtering " + str(KEYS_FOR_ML))
ret_df = filter_cols(ret_df)
print txt_w_border("Adding null flag columns")
ret_df = add_null_flag_cols(ret_df, del_single_cat_cols)
print txt_w_border("Binarizing " + str(BINARIZED_CAT_DICT.keys()))
ret_df = binarize(ret_df)
print txt_w_border("Nomarizing " + str(KEYS_FOR_NORM))
ret_df = normalize(ret_df)
key_of_fill = 0.
print txt_w_border("Filling null with " + str(key_of_fill))
ret_df = ret_df.fillna(key_of_fill)
return ret_df
def pre_proc_all():
print txt_w_border("Importing " + FILE_PATH_TRAIN_DATA)
raw_train_df = load_titanic_csv(FILE_PATH_TRAIN_DATA)
print txt_w_border("Importing " + FILE_PATH_PREDICT_DATA)
raw_test_df = load_titanic_csv(FILE_PATH_PREDICT_DATA)
raw_all_df = pd.concat([raw_train_df, raw_test_df])
t_df = pre_proc_per_df(raw_all_df, del_single_cat_cols=True)
def split_train_target(df, tf_surv):
temp_df = df[df.Survived_null_True==tf_surv]
target_df = temp_df[["Survived"]].copy()
train_df = temp_df.drop(["Survived", "Survived_null_True"], axis=1)
return train_df, target_df
train_df, train_target_df = split_train_target(t_df, tf_surv=False)
test_df, test_target_df = split_train_target(t_df, tf_surv=True)
return train_df, train_target_df, test_df, test_target_df
if __name__=="__main__":
train_df, train_target_df, test_df, test_target_df = pre_proc_all()
| mit |
ycaihua/scikit-learn | sklearn/externals/joblib/__init__.py | 35 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
rew4332/tensorflow | tensorflow/examples/skflow/iris_custom_model.py | 5 | 2554 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(features, layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Create two tensors respectively for prediction and loss.
prediction, loss = (
tf.contrib.learn.models.logistic_regression(features, target)
)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = classifier.predict(x_test)
score = metrics.accuracy_score(y_test, y_predicted['class'])
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
pstjohn/decay-methods | high_throughput/outliers_all.py | 1 | 2056 | import numpy as np
from methods.HTExperiment import Experiment
zhang = Experiment('zhang').split()
library = zhang.scaled_blfit_p.join(zhang.names_p)
well_breakdown = library.well.str.extract(
'(?P<well_row>[A-Za-z]{1})(?P<well_col>\d{1,2})')
library = library.drop('well', 1).join(well_breakdown)
library = library[library.Gene_ID.str.contains(r'0\d{1,6}$')]
def prune_outliers(df, max_dist=6, keys=None):
""" Function to return a dataframe in which none of the elements of
'keys' fall between -max_dist < df.key < max_dist """
if keys == None: keys = ['decay', 'amplitude', 'period']
return df[((df.loc[:,keys] < max_dist) &
(df.loc[:,keys] > -max_dist)).all(1)]
library = prune_outliers(library, max_dist=8).loc[
:, ['amplitude', 'decay', 'period', 'phase', 'Gene_ID']]
control = prune_outliers(zhang.scaled_blfit_c, max_dist=8)
# means = library.groupby('Gene_ID').mean()
from sklearn.covariance import EllipticEnvelope, MinCovDet
from scipy.stats import chi2
x_train = control.loc[:, ['amplitude', 'decay', 'period', 'phase']]
S = MinCovDet().fit(x_train)
control_mean = S.location_
inv_cov = np.linalg.pinv(S.covariance_)
# control_mean = x_train.mean()
# inv_cov = np.linalg.pinv(x_train.cov())
def hotelling_tsquared(x):
""" Function to test the perturbed population x (all with the same
Gene ID) against the control population in x_test, assuming equal
covariances """
pert_mean = x.drop(['Gene_ID'], 1).mean()
mean_diff = control_mean - pert_mean
T2 = mean_diff.dot(len(x)*inv_cov).dot(mean_diff)
return 1 - chi2.cdf(T2, 2)
pvals = library.groupby('Gene_ID').apply(hotelling_tsquared)
pert_means = library.groupby('Gene_ID').mean()
alpha = 0.01
print " Total genes considered: {0:d}".format(len(pvals))
print " Number of insignificant hits: {0:d}".format(sum(pvals > alpha))
print " Number of significant hits: {0:d}".format(sum(pvals <= alpha))
print "Percentage of significant hits: {0:0.2f}".format(100*float(sum(pvals <= alpha))/len(pvals))
| gpl-3.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/io/sas/sasreader.py | 6 | 2448 | """
Read SAS sas7bdat or xport files.
"""
from pandas import compat
def read_sas(filepath_or_buffer, format=None, index=None, encoding=None,
chunksize=None, iterator=False):
"""
Read SAS files stored as either XPORT or SAS7BDAT format files.
Parameters
----------
filepath_or_buffer : string or file-like object
Path to the SAS file.
format : string {'xport', 'sas7bdat'} or None
If None, file format is inferred. If 'xport' or 'sas7bdat',
uses the corresponding format.
index : identifier of index column, defaults to None
Identifier of column that should be used as index of the DataFrame.
encoding : string, default is None
Encoding for text data. If None, text data are stored as raw bytes.
chunksize : int
Read file `chunksize` lines at a time, returns iterator.
iterator : bool, defaults to False
If True, returns an iterator for reading the file incrementally.
Returns
-------
DataFrame if iterator=False and chunksize=None, else SAS7BDATReader
or XportReader
"""
if format is None:
buffer_error_msg = ("If this is a buffer object rather "
"than a string name, you must specify "
"a format string")
if not isinstance(filepath_or_buffer, compat.string_types):
raise ValueError(buffer_error_msg)
try:
fname = filepath_or_buffer.lower()
if fname.endswith(".xpt"):
format = "xport"
elif fname.endswith(".sas7bdat"):
format = "sas7bdat"
else:
raise ValueError("unable to infer format of SAS file")
except:
pass
if format.lower() == 'xport':
from pandas.io.sas.sas_xport import XportReader
reader = XportReader(filepath_or_buffer, index=index,
encoding=encoding,
chunksize=chunksize)
elif format.lower() == 'sas7bdat':
from pandas.io.sas.sas7bdat import SAS7BDATReader
reader = SAS7BDATReader(filepath_or_buffer, index=index,
encoding=encoding,
chunksize=chunksize)
else:
raise ValueError('unknown SAS format')
if iterator or chunksize:
return reader
data = reader.read()
reader.close()
return data
| mit |
macks22/gensim | gensim/sklearn_api/tfidf.py | 1 | 1952 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
import gensim
from gensim.models import TfidfModel
class TfIdfTransformer(TransformerMixin, BaseEstimator):
"""
Base Tf-Idf module
"""
def __init__(self, id2word=None, dictionary=None, wlocal=gensim.utils.identity,
wglobal=gensim.models.tfidfmodel.df2idf, normalize=True):
"""
Sklearn wrapper for Tf-Idf model.
"""
self.gensim_model = None
self.id2word = id2word
self.dictionary = dictionary
self.wlocal = wlocal
self.wglobal = wglobal
self.normalize = normalize
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
"""
self.gensim_model = TfidfModel(corpus=X, id2word=self.id2word, dictionary=self.dictionary,
wlocal=self.wlocal, wglobal=self.wglobal, normalize=self.normalize)
return self
def transform(self, docs):
"""
Return the transformed documents after multiplication with the tf-idf matrix.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# input as python lists
check = lambda x: [x] if isinstance(x[0], tuple) else x
docs = check(docs)
X = [[] for _ in range(0, len(docs))]
for k, v in enumerate(docs):
transformed_doc = self.gensim_model[v]
X[k] = transformed_doc
return X
| lgpl-2.1 |
bloyl/mne-python | tutorials/stats-source-space/60_cluster_rmANOVA_spatiotemporal.py | 18 | 12326 | """
======================================================================
Repeated measures ANOVA on source data with spatio-temporal clustering
======================================================================
This example illustrates how to make use of the clustering functions
for arbitrary, self-defined contrasts beyond standard t-tests. In this
case we will tests if the differences in evoked responses between
stimulation modality (visual VS auditory) depend on the stimulus
location (left vs right) for a group of subjects (simulated here
using one subject's data). For this purpose we will compute an
interaction effect using a repeated measures ANOVA. The multiple
comparisons problem is addressed with a cluster-level permutation test
across space and time.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Denis Engemannn <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(50, npad='auto')
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep * 1000 # convert to milliseconds
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph them
# to the same cortical space (e.g. fsaverage). For example purposes, we will
# simulate this by just having each "subject" have the same response (just
# noisy in source space) here.
#
# We'll only consider the left hemisphere in this tutorial.
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 ICO source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately, but here since all estimates are on
# 'sample' we can use one morph matrix for all the heavy lifting.
# Read the source space we are morphing to (just left hemisphere)
src = mne.read_source_spaces(src_fname)
fsave_vertices = [src[0]['vertno'], []]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat
morph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
###############################################################################
# Now we need to prepare the group matrix for the ANOVA statistic. To make the
# clustering function work correctly with the ANOVA function X needs to be a
# list of multi-dimensional arrays (one per condition) of shape: samples
# (subjects) x time x space.
#
# First we permute dimensions, then split the array into a list of conditions
# and discard the empty dimension resulting from the split using numpy squeeze.
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
###############################################################################
# Prepare function for arbitrary contrast
# ---------------------------------------
# As our ANOVA function is a multi-purpose tool we need to apply a few
# modifications to integrate it with the clustering function. This
# includes reshaping data, setting default arguments and processing
# the return values. For this reason we'll write a tiny dummy function.
#
# We will tell the ANOVA how to interpret the data matrix in terms of
# factors. This is done via the factor levels argument which is a list
# of the number factor levels for each factor.
factor_levels = [2, 2]
###############################################################################
# Finally we will pick the interaction effect by passing 'A:B'.
# (this notation is borrowed from the R formula language).
# As an aside, note that in this particular example, we cannot use the A*B
# notation which return both the main and the interaction effect. The reason
# is that the clustering function expects ``stat_fun`` to return a 1-D array.
# To get clusters for both, you must create a loop.
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
###############################################################################
# A ``stat_fun`` must deal with a variable number of input arguments.
#
# Inside the clustering function each condition will be passed as flattened
# array, necessitated by the clustering procedure. The ANOVA however expects an
# input array of dimensions: subjects X conditions X observations (optional).
#
# The following function catches the list input and swaps the first and the
# second dimension, and finally calls ANOVA.
#
# .. note:: For further details on this ANOVA function consider the
# corresponding
# :ref:`time-frequency tutorial <tut-timefreq-twoway-anova>`.
def stat_fun(*args):
# get f-values only.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
###############################################################################
# Compute clustering statistic
# ----------------------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial adjacency matrix (instead of spatio-temporal).
# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.0005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 128 # ... run fewer permutations (reduces sensitivity)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',
time_label='temporal extent (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('cluster-lh.png')
brain.show_view('medial')
###############################################################################
# Finally, let's investigate interaction effect by reconstructing the time
# courses:
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
| bsd-3-clause |
anntzer/scipy | scipy/special/_precompute/lambertw.py | 12 | 2001 | """Compute a Pade approximation for the principle branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
import numpy as np
try:
import mpmath
import matplotlib.pyplot as plt # type: ignore[import]
except ImportError:
pass
def lambertw_pade():
derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print("p = {}".format(p))
print("q = {}".format(q))
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
kkk669/mxnet | docs/conf.py | 20 | 6941 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import sys, os, re, subprocess
import mock
from recommonmark import parser
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
libpath = os.path.join(curr_path, '../python/')
sys.path.insert(0, libpath)
sys.path.insert(0, curr_path)
# -- mock out modules
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# General information about the project.
project = u'mxnet'
author = u'%s developers' % project
copyright = u'2015-2017, %s' % author
github_doc_root = 'https://github.com/dmlc/mxnet/tree/master/docs/'
doc_root = 'http://mxnet.io/'
# add markdown parser
source_parsers = {
'.md': parser.CommonMarkParser,
'.Rmd': parser.CommonMarkParser
}
# Version information.
# from mxnet import libinfo
# version = libinfo.__version__
# release = libinfo.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'breathe',
'mxdoc'
]
# Use breathe to include doxygen documents
breathe_projects = {'mxnet' : 'doxygen/xml/'}
breathe_default_project = 'mxnet'
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# source_suffix = '.rst'
source_suffix = ['.rst', '.md', '.Rmd']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Version and release are passed from CMake.
#version = None
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['virtualenv']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
suppress_warnings = [
'image.nonlocal_uri',
]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mxnet-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': 'relations.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'formatdoc'
| apache-2.0 |
LiaoPan/scikit-learn | sklearn/linear_model/coordinate_descent.py | 42 | 73973 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already.
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False, fit_intercept=False,
copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
precompute = check_array(precompute, 'csc', dtype=np.float64, order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F', copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if not np.may_share_memory(reference_to_old_X.data, X.data):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
alexvmarch/exa | exa/util/mpl.py | 2 | 6838 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Matplotlib Utilities
###############################
"""
import seaborn as sns
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
legend = {'legend.frameon': True, 'legend.facecolor': 'white',
'legend.fancybox': True, 'patch.facecolor': 'white',
'patch.edgecolor': 'black'}
axis = {'axes.formatter.useoffset': False}
mpl_legend = {'legend.frameon': True, 'legend.facecolor': 'white',
'legend.edgecolor': 'black'}
mpl_mathtext = {'mathtext.default': 'rm'}
mpl_save = {'savefig.format': 'pdf', 'savefig.bbox': 'tight',
'savefig.transparent': True, 'savefig.pad_inches': 0.1,
'pdf.compression': 9}
mpl_rc = mpl_legend
mpl_rc.update(mpl_mathtext)
mpl_rc.update(mpl_save)
sns.set(context='poster', style='white', palette='colorblind', font_scale=1.3,
font='serif', rc=mpl_rc)
def _gen_projected(nxplot, nyplot, projection, figargs):
total = nxplot * nyplot
fig = sns.mpl.pyplot.figure(**figargs)
kwargs = {'projection': projection}
axs = [fig.add_subplot(nxplot, nyplot, i, **kwargs) for i in range(1, total + 1)]
return fig, axs
def _gen_shared(nxplot, nyplot, sharex, sharey, figargs):
fig, axs = sns.mpl.pyplot.subplots(nxplot, nyplot, sharex=sharex,
sharey=sharey, **figargs)
axs = fig.get_axes()
return fig, axs
def _gen_figure(nxplot=1, nyplot=1, figargs=None, projection=None,
sharex='none', joinx=False, sharey='none', joiny=False,
x=None, nxlabel=None, xlabels=None, nxdecimal=None, xmin=None, xmax=None,
y=None, nylabel=None, ylabels=None, nydecimal=None, ymin=None, ymax=None,
z=None, nzlabel=None, zlabels=None, nzdecimal=None, zmin=None, zmax=None,
r=None, nrlabel=None, rlabels=None, nrdecimal=None, rmin=None, rmax=None,
t=None, ntlabel=None, tlabels=None, fontsize=20):
"""
Returns a figure object with as much customization as provided.
"""
figargs = {} if figargs is None else figargs
if projection is not None:
fig, axs = _gen_projected(nxplot, nyplot, projection, figargs)
else:
fig, axs = _gen_shared(nxplot, nyplot, sharex, sharey, figargs)
adj = {}
if joinx: adj.update({'hspace': 0})
if joiny: adj.update({'wspace': 0})
fig.subplots_adjust(**adj)
data = {}
if projection is None:
data = {'x': x, 'y': y}
elif projection == '3d':
data = {'x': x, 'y': y, 'z': z}
elif projection == 'polar':
data = {'r': r, 't': t}
methods = {}
for ax in axs:
if 'x' in data:
methods['x'] = (ax.set_xlim, ax.set_xticks, ax.set_xticklabels,
nxlabel, xlabels, nxdecimal, xmin, xmax)
if 'y' in data:
methods['y'] = (ax.set_ylim, ax.set_yticks, ax.set_yticklabels,
nylabel, ylabels, nydecimal, ymin, ymax)
if 'z' in data:
methods['z'] = (ax.set_zlim, ax.set_zticks, ax.set_zticklabels,
nzlabel, zlabels, nzdecimal, zmin, zmax)
if 'r' in data:
methods['r'] = (ax.set_rlim, ax.set_rticks, ax.set_rgrids,
nrlabel, rlabels, nrdecimal, rmin, rmax)
if 't' in data:
methods['t'] = (ax.set_thetagrids, ntlabel, tlabels)
for dim, arr in data.items():
if dim == 't':
grids, nlabel, labls = methods[dim]
if ntlabel is not None:
theta = np.arange(0, 2 * np.pi, 2 * np.pi / ntlabel)
if labls is not None:
grids(np.degrees(theta), labls, fontsize=fontsize)
else:
grids(np.degrees(theta), fontsize=fontsize)
else:
lim, ticks, labels, nlabel, labls, decs, mins, maxs = methods[dim]
if arr is not None:
amin = mins if mins is not None else arr.min()
amax = maxs if maxs is not None else arr.max()
lim((amin, amax))
elif mins is not None and maxs is not None:
if nlabel is not None:
ticks(np.linspace(amin, amax, nlabel))
if decs is not None:
sub = "{{:.{}f}}".format(decs).format
labels([sub(i) for i in np.linspace(amin, amax, nlabel)])
if labls is not None:
labels(labls)
ax.tick_params(axis=dim, labelsize=fontsize)
return fig
def _plot_surface(x, y, z, nxlabel, nylabel, nzlabel, method,
figargs, axargs):
fig = _gen_figure(x=x, y=y, z=z, nxlabel=nxlabel,
nylabel=nylabel, nzlabel=nzlabel,
figargs=figargs, projection='3d')
ax = fig.get_axes()[0]
convenience = {'wireframe': ax.plot_wireframe,
'contour': ax.contour,
'contourf': ax.contourf,
'trisurf': ax.plot_trisurf,
'scatter': ax.scatter,
'line': ax.plot}
if method not in convenience.keys():
raise Exception('Method must be in {}.'.format(convenience.keys()))
sx, sy = np.meshgrid(x, y)
if method in ['trisurf', 'scatter', 'line']:
if method == 'line':
axargs = {key: val for key, val in axargs.items() if key != 'cmap'}
convenience[method](sx.flatten(), sy.flatten(), z.flatten(), **axargs)
else:
convenience[method](sx, sy, z, **axargs)
return fig
def _plot_contour(x, y, z, vmin, vmax, cbarlabel, ncbarlabel, ncbardecimal,
nxlabel, nylabel, method, colorbar, figargs, axargs):
fig = _gen_figure(x=x, y=y, nxlabel=nxlabel, nylabel=nylabel, figargs=figargs)
ax = fig.get_axes()[0]
convenience = {'contour': ax.contour,
'contourf': ax.contourf,
'pcolormesh': ax.pcolormesh,
'pcolor': ax.pcolor}
if method not in convenience.keys():
raise Exception('method must be in {}'.format(convenience.keys()))
t = convenience[method](x, y, z, **axargs)
cbar = fig.colorbar(t) if colorbar else None
if cbar is not None and cbarlabel is not None:
cbar.set_label(cbarlabel)
if cbar is not None and ncbarlabel is not None:
newticks = np.linspace(vmin, vmax, ncbarlabel)
cbar.set_ticks(newticks)
if ncbardecimal is not None:
fmt = '{{:.{}f}}'.format(ncbardecimal).format
cbar.set_ticklabels([fmt(i) for i in newticks])
return fig, cbar
| apache-2.0 |
goodfeli/pylearn2 | pylearn2/sandbox/cuda_convnet/bench.py | 44 | 3589 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
output = FilterActs()(images, filters)
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01v = base_image_value.transpose(3,0,1,2)
filters_bc01v = base_filters_value.transpose(3,0,1,2)
filters_bc01v = filters_bc01v[:,:,::-1,::-1]
images_bc01 = shared(images_bc01v)
filters_bc01 = shared(filters_bc01v)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', image_shape = images_bc01v.shape,
filter_shape = filters_bc01v.shape)
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 64,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
"""
| bsd-3-clause |
joernhees/scikit-learn | examples/ensemble/plot_random_forest_regression_multioutput.py | 46 | 2640 | """
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/backends/backend_gtkagg.py | 10 | 4369 | """
Render to gtk from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print('backend_gtkagg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKAgg(figure)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print('backend_gtkagg.new_figure_manager done')
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print('FigureCanvasGTKAgg.configure_event')
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch, forward=False)
self._need_redraw = True
self.resize_event()
if DEBUG: print('FigureCanvasGTKAgg.configure_event end')
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print('FigureCanvasGTKAgg.render_figure')
FigureCanvasAgg.draw(self)
if DEBUG: print('FigureCanvasGTKAgg.render_figure pixmap', pixmap)
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba()
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print('FigureCanvasGTKAgg.render_figure done')
def blit(self, bbox=None):
if DEBUG: print('FigureCanvasGTKAgg.blit', self._pixmap)
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print('FigureCanvasGTKAgg.done')
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
FigureCanvas = FigureCanvasGTKAgg
FigureManager = FigureManagerGTKAgg
| gpl-3.0 |
jseabold/statsmodels | statsmodels/datasets/longley/data.py | 4 | 1924 | """Longley dataset"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
The classic 1967 Longley Data
http://www.itl.nist.gov/div898/strd/lls/data/Longley.shtml
::
Longley, J.W. (1967) "An Appraisal of Least Squares Programs for the
Electronic Comptuer from the Point of View of the User." Journal of
the American Statistical Association. 62.319, 819-41.
"""
DESCRSHORT = """"""
DESCRLONG = """The Longley dataset contains various US macroeconomic
variables that are known to be highly collinear. It has been used to appraise
the accuracy of least squares routines."""
NOTE = """::
Number of Observations - 16
Number of Variables - 6
Variable name definitions::
TOTEMP - Total Employment
GNPDEFL - GNP deflator
GNP - GNP
UNEMP - Number of unemployed
ARMED - Size of armed forces
POP - Population
YEAR - Year (1947 - 1962)
"""
def load(as_pandas=None):
"""
Load the Longley data and return a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def load_pandas():
"""
Load the Longley data and return a Dataset class.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_pandas(data, endog_idx=0)
def _get_data():
data = du.load_csv(__file__, 'longley.csv')
data = data.iloc[:, [1, 2, 3, 4, 5, 6, 7]].astype(float)
return data
| bsd-3-clause |
mjgrav2001/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
amolkahat/pandas | asv_bench/benchmarks/io/excel.py | 5 | 1173 | import numpy as np
from pandas import DataFrame, date_range, ExcelWriter, read_excel
from pandas.compat import BytesIO
import pandas.util.testing as tm
class Excel(object):
params = ['openpyxl', 'xlsxwriter', 'xlwt']
param_names = ['engine']
def setup(self, engine):
N = 2000
C = 5
self.df = DataFrame(np.random.randn(N, C),
columns=['float{}'.format(i) for i in range(C)],
index=date_range('20000101', periods=N, freq='H'))
self.df['object'] = tm.makeStringIndex(N)
self.bio_read = BytesIO()
self.writer_read = ExcelWriter(self.bio_read, engine=engine)
self.df.to_excel(self.writer_read, sheet_name='Sheet1')
self.writer_read.save()
self.bio_read.seek(0)
def time_read_excel(self, engine):
read_excel(self.bio_read)
def time_write_excel(self, engine):
bio_write = BytesIO()
bio_write.seek(0)
writer_write = ExcelWriter(bio_write, engine=engine)
self.df.to_excel(writer_write, sheet_name='Sheet1')
writer_write.save()
from ..pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
ycaihua/scikit-learn | benchmarks/bench_random_projections.py | 397 | 8900 | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
| bsd-3-clause |
JeanKossaifi/scikit-learn | sklearn/utils/multiclass.py | 45 | 12390 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not array_equal(clf.classes_, unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
kingtaurus/cs224d | assignment3/codebase_release/rnn.py | 1 | 16026 | import sys
import os
import random
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import itertools
import shutil
import tensorflow as tf
import tree as tr
from utils import Vocab
from collections import OrderedDict
import seaborn as sns
sns.set_style('whitegrid')
def initialize_uninitialized_vars(session):
uninitialized = [ var for var in tf.all_variables()
if not session.run(tf.is_variable_initialized(var)) ]
session.run(tf.initialize_variables(uninitialized))
def variable_summaries(variable, name):
with tf.name_scope("summaries"):
mean = tf.reduce_mean(variable)
tf.summary.scalar('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_sum(tf.square(variable - mean)))
tf.summary.scalar('stddev/' + name, stddev)
tf.summary.scalar('max/' + name, tf.reduce_max(variable))
tf.summary.scalar('min/' + name, tf.reduce_min(variable))
tf.summary.histogram(name, variable)
RESET_AFTER = 50
class Config(object):
"""Holds model hyperparams and data information.
Model objects are passed a Config() object at instantiation.
"""
embed_size = 50
label_size = 2
early_stopping = 2
anneal_threshold = 0.99
anneal_by = 1.5
max_epochs = 30
lr = 0.01
l2 = 0.02
model_name = 'rnn_embed=%d_l2=%f_lr=%f.weights'%(embed_size, l2, lr)
class RNN_Model():
def load_data(self):
"""Loads train/dev/test data and builds vocabulary."""
self.train_data, self.dev_data, self.test_data = tr.simplified_data(700, 100, 200)
# build vocab from training data
self.vocab = Vocab()
train_sents = [t.get_words() for t in self.train_data]
self.vocab.construct(list(itertools.chain.from_iterable(train_sents)))
def inference(self, tree, predict_only_root=False):
"""For a given tree build the RNN models computation graph up to where it
may be used for inference.
Args:
tree: a Tree object on which to build the computation graph for the RNN
Returns:
softmax_linear: Output tensor with the computed logits.
"""
node_tensors = self.add_model(tree.root)
if predict_only_root:
node_tensors = node_tensors[tree.root]
else:
node_tensors = [tensor for node, tensor in node_tensors.items() if node.label!=2]
node_tensors = tf.concat(0, node_tensors)
return self.add_projections(node_tensors)
def add_model_vars(self):
'''
You model contains the following parameters:
embedding: tensor(vocab_size, embed_size)
W1: tensor(2* embed_size, embed_size)
b1: tensor(1, embed_size)
U: tensor(embed_size, output_size)
bs: tensor(1, output_size)
Hint: Add the tensorflow variables to the graph here and *reuse* them while building
the compution graphs for composition and projection for each tree
Hint: Use a variable_scope "Composition" for the composition layer, and
"Projection") for the linear transformations preceding the softmax.
'''
with tf.variable_scope('Composition'):
### YOUR CODE HERE
#initializer=initializer=tf.random_normal_initializer(0,3)
embedding = tf.get_variable("embedding",
[self.vocab.total_words, self.config.embed_size])
W1 = tf.get_variable("W1", [2 * self.config.embed_size, self.config.embed_size])
b1 = tf.get_variable("b1", [1, self.config.embed_size])
variable_summaries(embedding, embedding.name)
variable_summaries(W1, W1.name)
variable_summaries(b1, b1.name)
### END YOUR CODE
with tf.variable_scope('Projection'):
### YOUR CODE HERE
U = tf.get_variable("U", [self.config.embed_size, self.config.label_size])
bs = tf.get_variable("bs", [1, self.config.label_size])
variable_summaries(U, U.name)
variable_summaries(bs, bs.name)
### END YOUR CODE
def add_model(self, node):
"""Recursively build the model to compute the phrase embeddings in the tree
Hint: Refer to tree.py and vocab.py before you start. Refer to
the model's vocab with self.vocab
Hint: Reuse the "Composition" variable_scope here
Hint: Store a node's vector representation in node.tensor so it can be
used by it's parent
Hint: If node is a leaf node, it's vector representation is just that of the
word vector (see tf.gather()).
Args:
node: a Node object
Returns:
node_tensors: Dict: key = Node, value = tensor(1, embed_size)
"""
with tf.variable_scope('Composition', reuse=True):
### YOUR CODE HERE
embedding = tf.get_variable("embedding")
W1 = tf.get_variable("W1")
b1 = tf.get_variable("b1")
l2_loss = tf.nn.l2_loss(W1) + tf.nn.l2_loss(b1)
tf.add_to_collection(name="l2_loss", value=l2_loss)
### END YOUR CODE
W_split = tf.split(0, 2, W1)
W_left = W_split[0]
W_right = W_split[1]
node_tensors = OrderedDict()
curr_node_tensor = None
if node.isLeaf:
### YOUR CODE HERE
word_id = self.vocab.encode(node.word)
curr_node_tensor = tf.expand_dims(tf.gather(embedding, word_id),0)
### END YOUR CODE
else:
node_tensors.update(self.add_model(node.left))
node_tensors.update(self.add_model(node.right))
### YOUR CODE HERE
#tf.concat(0,[node_tensors[node.left], node_tensors[node.right]])
#This operation could be done without the split call above
#curr_node_tensor = tf.nn.relu(tf.matmul(child_tensor, W1) + b1)
curr_node_tensor = tf.matmul(node_tensors[node.left], W_left) + tf.matmul(node_tensors[node.right], W_right) + b1
### END YOUR CODE
node_tensors[node] = curr_node_tensor
return node_tensors
def add_projections(self, node_tensors):
"""Add projections to the composition vectors to compute the raw sentiment scores
Hint: Reuse the "Projection" variable_scope here
Args:
node_tensors: tensor(?, embed_size)
Returns:
output: tensor(?, label_size)
"""
logits = None
### YOUR CODE HERE
with tf.variable_scope("Projection", reuse=True):
U = tf.get_variable("U")
bs = tf.get_variable("bs")
logits = tf.matmul(node_tensors, U) + bs
### END YOUR CODE
return logits
def loss(self, logits, labels):
"""Adds loss ops to the computational graph.
Hint: Use sparse_softmax_cross_entropy_with_logits
Hint: Remember to add l2_loss (see tf.nn.l2_loss)
Args:
logits: tensor(num_nodes, output_size)
labels: python list, len = num_nodes
Returns:
loss: tensor 0-D
"""
loss = None
# YOUR CODE HERE
l2_loss = self.config.l2 * tf.get_collection("l2_loss")[0]
objective_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
loss = objective_loss + l2_loss
tf.summary.scalar("loss_l2", l2_loss)
tf.summary.scalar("loss_objective", tf.reduce_sum(objective_loss))
tf.summary.scalar("loss_total", loss)
# END YOUR CODE
return loss
def training(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.GradientDescentOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: tensor 0-D
Returns:
train_op: tensorflow op for training.
"""
train_op = None
# YOUR CODE HERE
optimizer = tf.train.GradientDescentOptimizer(self.config.lr)
#optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss)
# END YOUR CODE
return train_op
def predictions(self, y):
"""Returns predictions from sparse scores
Args:
y: tensor(?, label_size)
Returns:
predictions: tensor(?,1)
"""
predictions = None
# YOUR CODE HERE
predictions = tf.argmax(y, dimension=1)
# END YOUR CODE
return predictions
def __init__(self, config):
self.config = config
self.load_data()
self.merged_summaries = None
self.summary_writer = None
def predict(self, trees, weights_path, get_loss = False):
"""Make predictions from the provided model."""
results = []
losses = []
for i in range(int(math.ceil(len(trees)/float(RESET_AFTER)))):
with tf.Graph().as_default(), tf.Session() as sess:
self.add_model_vars()
saver = tf.train.Saver()
saver.restore(sess, weights_path)
for tree in trees[i*RESET_AFTER: (i+1)*RESET_AFTER]:
logits = self.inference(tree, True)
predictions = self.predictions(logits)
root_prediction = sess.run(predictions)[0]
if get_loss:
root_label = tree.root.label
loss = sess.run(self.loss(logits, [root_label]))
losses.append(loss)
results.append(root_prediction)
return results, losses
def run_epoch(self, new_model = False, verbose=True, epoch=0):
step = 0
loss_history = []
while step < len(self.train_data):
with tf.Graph().as_default(), tf.Session() as sess:
self.add_model_vars()
if new_model:
init = tf.global_variables_initializer()
sess.run(init)
new_model = False
else:
saver = tf.train.Saver()
saver.restore(sess, './weights/%s.temp'%self.config.model_name)
for r_step in range(RESET_AFTER):
if step>=len(self.train_data):
break
tree = self.train_data[step]
logits = self.inference(tree)
labels = [l for l in tree.labels if l!=2]
loss = self.loss(logits, labels)
train_op = self.training(loss)
#initialize_uninitialized_vars(sess)
if r_step == 0:
self.merged_summaries = tf.summary.merge_all()
# self.summary_writer = tf.train.SummaryWriter("tree_rnn_log/", sess.graph)
if step == 0 and epoch == 0:
self.summary_writer = tf.summary.FileWriter("tree_rnn_log/", sess.graph)
loss, _, merged = sess.run([loss, train_op, self.merged_summaries])
if step % (RESET_AFTER//2):
self.summary_writer.add_summary(merged, epoch * len(self.train_data) + step)
loss_history.append(loss)
if verbose:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, len(self.train_data), np.mean(loss_history)))
sys.stdout.flush()
step+=1
saver = tf.train.Saver()
if not os.path.exists("./weights"):
os.makedirs("./weights")
saver.save(sess, './weights/%s.temp'%self.config.model_name, write_meta_graph=False)
train_preds, _ = self.predict(self.train_data, './weights/%s.temp'%self.config.model_name)
val_preds, val_losses = self.predict(self.dev_data, './weights/%s.temp'%self.config.model_name, get_loss=True)
train_labels = [t.root.label for t in self.train_data]
val_labels = [t.root.label for t in self.dev_data]
train_acc = np.equal(train_preds, train_labels).mean()
val_acc = np.equal(val_preds, val_labels).mean()
print()
print('Training acc (only root node): {}'.format(train_acc))
print('Valiation acc (only root node): {}'.format(val_acc))
print(self.make_conf(train_labels, train_preds))
print(self.make_conf(val_labels, val_preds))
return train_acc, val_acc, loss_history, np.mean(val_losses)
def train(self, verbose=True):
complete_loss_history = []
train_acc_history = []
val_acc_history = []
prev_epoch_loss = float('inf')
best_val_loss = float('inf')
best_val_epoch = 0
stopped = -1
for epoch in range(self.config.max_epochs):
print('epoch %d'%epoch)
if epoch==0:
train_acc, val_acc, loss_history, val_loss = self.run_epoch(new_model=True, epoch=epoch)
else:
train_acc, val_acc, loss_history, val_loss = self.run_epoch(epoch=epoch)
complete_loss_history.extend(loss_history)
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
#lr annealing
epoch_loss = np.mean(loss_history)
if epoch_loss>prev_epoch_loss*self.config.anneal_threshold:
self.config.lr/=self.config.anneal_by
print('annealed lr to %f'%self.config.lr)
prev_epoch_loss = epoch_loss
#save if model has improved on val
if val_loss < best_val_loss:
shutil.copyfile('./weights/%s.temp'%self.config.model_name, './weights/%s'%self.config.model_name)
best_val_loss = val_loss
best_val_epoch = epoch
# if model has not imprvoved for a while stop
if epoch - best_val_epoch > self.config.early_stopping:
stopped = epoch
#break
if verbose:
sys.stdout.write('\r')
sys.stdout.flush()
print('\n\nstopped at %d\n'%stopped)
return {
'loss_history': complete_loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def make_conf(self, labels, predictions):
confmat = np.zeros([2, 2])
for l,p in zip(labels, predictions):
confmat[l, p] += 1
return confmat
def test_RNN():
"""Test RNN model implementation.
You can use this function to test your implementation of the Named Entity
Recognition network. When debugging, set max_epochs in the Config object to 1
so you can rapidly iterate.
"""
config = Config()
model = RNN_Model(config)
start_time = time.time()
stats = model.train(verbose=True)
print('Training time: {}'.format(time.time() - start_time))
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.savefig("loss_history.png")
plt.show()
print('Test')
print('=-=-=')
predictions, _ = model.predict(model.test_data, './weights/%s'%model.config.model_name)
labels = [t.root.label for t in model.test_data]
test_acc = np.equal(predictions, labels).mean()
print('Test acc: {}'.format(test_acc))
if __name__ == "__main__":
test_RNN()
| mit |
andrewnc/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
qifeigit/scikit-learn | examples/classification/plot_classifier_comparison.py | 181 | 4699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
mayblue9/bokeh | bokeh/compat/mpl.py | 32 | 2834 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from warnings import warn
import matplotlib.pyplot as plt
from .bokeh_exporter import BokehExporter
from .bokeh_renderer import BokehRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def to_bokeh(fig=None, name=None, server=None, notebook=None, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if name is not None:
warn("Use standard output_file(...) from bokeh.io")
if server is not None:
warn("Use standard output_server(...) from bokeh.io")
if notebook is not None:
warn("Use standard output_notebook() from bokeh.io")
if fig is None:
fig = plt.gcf()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
| bsd-3-clause |
cedar10b/travelapp | cities.py | 1 | 2476 | # -*- coding: utf-8 -*-
import re
import urllib2
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from sql_functions import *
df = pd.DataFrame([], columns=['city', 'state', 'population'])
# find the largest cities in the US from wikipedia
url = 'https://en.wikipedia.org/wiki/List_of_United_States_cities_by_population'
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
table = soup.find('table', {'class': 'wikitable sortable'})
entry = {}
for row in table.findAll('tr')[1:]:
city = row.findAll('td')[1].contents[0]
#state = row.findAll('td')[2].a.attrs['title']
state = str(row.findAll('td')[2].a)
state = re.compile('>(.*?)</a>').search(state).group(1)
population = row.findAll('td')[4].contents[0]
population = int(population.replace(',', ''))
city = city.string
df = df.append({'city': city, 'state': state, 'population': population}, ignore_index=True)
df.ix[df.state.values == "Hawai'i", 'state'] = 'Hawaii'
df.ix[df.city.values == u"Winston\u2013Salem", 'city'] = 'Winston-Salem'
# add state abbreviation
state_abbr = pd.DataFrame([], columns=['state', 'state_abbr'])
url = 'http://www.stateabbreviations.us/'
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
table = soup.find('table', {'class': 'f'})
for row in table.findAll('tr')[2:]:
state = str(row.findAll('td')[0].a)
state = re.compile('>(.*?)</a>').search(state).group(1)
abbr = row.findAll('td')[2].contents[0]
state_abbr = state_abbr.append({'state': state, 'state_abbr': abbr}, ignore_index=True)
df = pd.merge(df, state_abbr, on='state', how='left')
df.ix[df.state == 'District of Columbia', 'state_abbr'] = 'DC'
# drop SF Bay Area cities from the list -- you can drive to these cities
bay_area_cities = ['San Jose', 'San Francisco', 'Sacramento',
'Oakland', 'Stockton', 'Fremont', 'Modesto']
for i in range(len(bay_area_cities)):
df.drop(df[df.city == bay_area_cities[i]].index.values,
inplace=True)
df.set_index(pd.Index(np.arange(df.shape[0])), inplace=True)
# normalize population
# scale saturates above 2,500,000 and below 200,000
# the log is taken and it is normalized from 0 to 1.
trunc_pop = np.where(df.population > 2500000, 2500000, df.population)
trunc_pop = np.where(df.population < 200000, 200000, trunc_pop)
trunc_pop = np.log(trunc_pop)
trunc_pop -= trunc_pop.min()
trunc_pop /= trunc_pop.max()
df['norm_popul'] = trunc_pop
create_table(df, 'cities')
| mit |
IITM-DONLAB/python-dnn | src/pythonDnn/utils/plotter.py | 1 | 1076 | import matplotlib
import numpy as np
import logging
from pythonDnn.io_modules import create_folder_structure_if_not_exists
logger = logging.getLogger(__name__)
def plot(layer_output,path,layer_idx,batch_idx,img_plot_remaining,max_subplots=100):
#print layer_output;
matplotlib.use('Agg')
import matplotlib.pyplot as plt
num_examples = layer_output.__len__();
for eg_idx in xrange(num_examples):
if img_plot_remaining == 0:
break;
save_path = path%(layer_idx,batch_idx,eg_idx)
logger.debug('Plotting the feature map %d in %s'%(eg_idx,save_path));
eg_output = layer_output[eg_idx];
num_plots = min(max_subplots,eg_output.__len__());
cols = int(np.ceil(num_plots/10.0));
fig,plots = plt.subplots(10,cols);
plots = plots.flatten();
for idx in xrange(num_plots):
plots[idx].imshow(eg_output[idx],interpolation='nearest');
plots[idx].axis('off');
create_folder_structure_if_not_exists(save_path)
# Save the full figure...
fig.savefig(save_path,bbox_inches='tight')
plt.close()
img_plot_remaining-=1;
return img_plot_remaining;
| apache-2.0 |
ky822/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
criffy/aflengine | analysis/misc/progtest.py | 1 | 1173 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 21:24:48 2018
@author: chrisstrods
"""
import pandas as pd
from os.path import dirname, abspath
import re
#load files
d = dirname(dirname(dirname(abspath(__file__))))
progression = pd.read_csv(d+"/bench/progression.csv")
quarters = pd.read_csv(d+"/bench/quarters.csv")
matchprog = progression.loc[progression["matchid"] == "2017PFADEGEE"]
matchq = quarters.loc[quarters["matchid"] == "2017PFADEGEE"]
matchprog["hscore"] = int(re.split('.-',matchprog["score"]))[2]
matchmins = matchq.iloc[0]["minutes"] + \
matchq.iloc[1]["minutes"] + \
matchq.iloc[2]["minutes"] + \
matchq.iloc[3]["minutes"] + \
int(60/(matchq.iloc[0]["seconds"] + \
matchq.iloc[1]["seconds"] + \
matchq.iloc[2]["seconds"] + \
matchq.iloc[3]["seconds"]))
matchsecs = (matchq.iloc[0]["seconds"] + \
matchq.iloc[1]["seconds"] + \
matchq.iloc[2]["seconds"] + \
matchq.iloc[3]["seconds"])%60
matchlength = matchmins + (matchsecs / 60)
| gpl-3.0 |
chaluemwut/fbserver | filtercomputation.py | 1 | 5428 | # -*- coding: UTF-8 -*-
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from database import *
from crf import *
import sys, os, math
import numpy as np
import copy
from utilfile import FileUtil
class InstanceFilterData(object):
# print 'instanace'
_instance = None
_lst = []
_lstFilter = []
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(InstanceFilterData, cls).__new__(
cls, *args, **kwargs)
db = Database()
cls._lst = db.select_filter_el()
crfWordSeg = CRFWordSegment()
for x in cls._lst:
if x != '':
segMsg = crfWordSeg.crfpp(x)
# print ' '.join(segMsg)
cls._lstFilter.append(' '.join(segMsg))
return cls._instance
def getFilterData(self):
return self._lstFilter
class CRFWordSegment(object):
def process_ans(self, lst):
b_str = ''
ans_str = ''
for line_data in lst:
try :
data = line_data.split('\t')
b_data = data[3][:-1]
if b_data == 'B':
b_str = b_str + 'B'
else:
b_str = b_str + 'I'
ans_str = ans_str+data[0]
except Exception, e:
b_str = b_str+'B'
ans_str = ans_str+' '
return b_str, ans_str
def crfpp(self, msg):
crf = CRF()
fileUtil = FileUtil()
crf.create_file_input(msg)
os.system('crf_test -m model1 crf.test.data > crf.result')
lst = fileUtil.read_file('crf.result')
# lst = [a for a in lst if a != u'\n']
# str_ans = reduce(lambda x,y:x+y, [a.split('\t')[0] for a in lst])
# ans = reduce(lambda x,y:x+y, [a.split('\t')[3][:-1] for a in lst])
# lst_col3 = [a.split('\t')[3][:-1] for a in lst]
lst_col3, str_ans = self.process_ans(lst)
lst_ans = [n for (n, e) in enumerate(lst_col3) if e == 'B']
result_lst = []
for i in range(len(lst_ans)-1):
a = lst_ans[i]
b = lst_ans[i+1]
result_lst.append(str_ans[a:b])
result_lst.append(str_ans[b:len(str_ans)])
return result_lst
class FilterComputation(object):
def __init__(self):
pass
def findResutl(self, result):
for x in result:
if x > 0.4:
return 'no'
return 'yes'
def debugResult(self, result, document):
for x in range(0, len(result)):
print ' d ',result[x],' -- ',document[x]
def computeCos(self, message, corpus):
try:
crfWordSeg = CRFWordSegment()
lstInput = crfWordSeg.crfpp(message)
inMessage = ' '.join(lstInput)
documents = copy.copy(corpus)
documents.insert(0, inMessage)
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
result_lst = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix)
result = result_lst[0]
return result
except Exception as e:
print e
def invFBFilter(self, message):
try:
crfWordSeg = CRFWordSegment()
lstInput = crfWordSeg.crfpp(message)
inMessage = ' '.join(lstInput)
insData = InstanceFilterData()
lst = insData.getFilterData()
documents = copy.copy(lst)
documents.insert(0, inMessage)
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
result_lst = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix)
result = result_lst[0]
map = {}
for x in range(0, len(documents)):
key = documents[x]
value = result[x]
map[key] = value
# print map
return map
except Exception as e:
print e
def isFilterMessage(self, message):
try:
crfWordSeg = CRFWordSegment()
lstInput = crfWordSeg.crfpp(message)
inMessage = ' '.join(lstInput)
insData = InstanceFilterData()
lst = insData.getFilterData()
documents = copy.copy(lst)
documents.insert(0, inMessage)
tfidf_vectorizer = TfidfVectorizer()
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
result_lst = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix)
result = result_lst[0]
# self.debugResult(result, documents)
return self.findResutl(result[1:])
# if b:
# return b, data
# else:
# print '******************* else'
# maxIndex = max(result)
# data = documents[np.where(result == maxIndex)[0][0]]
# print 'data : ',data
# return b, data
except Exception as e:
print e
# u = unicode('ทดสอบ', 'utf-8')
# filter = FilterComputation()
# filter.isFilterMessage(u)
| apache-2.0 |
tdhopper/scikit-learn | sklearn/decomposition/tests/test_pca.py | 199 | 10949 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
# PCA on dense arrays
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by RandomizedPCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by RandomizedPCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that RandomizedPCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
| bsd-3-clause |
savitasavadi/ml_lab_ecsc_306 | labwork/lab7/sci-learn/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| apache-2.0 |
keskitalo/healpy | healpy/projaxes.py | 2 | 43390 | #
# This file is part of Healpy.
#
# Healpy is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Healpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Healpy; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# For more information about Healpy, see http://code.google.com/p/healpy
#
from . import projector as P
from . import rotator as R
from . import pixelfunc
import matplotlib
import matplotlib.axes
import numpy as np
import six
from ._healpy_pixel_lib import UNSEEN
pi = np.pi
dtor = pi / 180.0
class SphericalProjAxes(matplotlib.axes.Axes):
"""Define a special Axes to take care of spherical projection.
Parameters
----------
projection : a SphericalProj class or a class derived from it.
type of projection
rot : list or string
define rotation. See rotator.
coord : list or string
define coordinate system. See rotator.
coordprec : number of digit after floating point for coordinates display.
format : format string for value display.
Notes
-----
Other keywords from Axes (see Axes).
"""
def __init__(self, ProjClass, *args, **kwds):
if not issubclass(ProjClass, P.SphericalProj):
raise TypeError(
"First argument must be a SphericalProj class " "(or derived from)"
)
self.proj = ProjClass(
rot=kwds.pop("rot", None),
coord=kwds.pop("coord", None),
flipconv=kwds.pop("flipconv", None),
**kwds.pop("arrayinfo", {})
)
kwds.setdefault("format", "%g")
kwds.setdefault("coordprec", 2)
kwds["aspect"] = "equal"
super(SphericalProjAxes, self).__init__(*args, **kwds)
self.axis("off")
self.set_autoscale_on(False)
xmin, xmax, ymin, ymax = self.proj.get_extent()
self.set_xlim(xmin, xmax)
self.set_ylim(ymin, ymax)
dx, dy = self.proj.ang2xy(pi / 2.0, 1.0 * dtor, direct=True)
self._segment_threshold = 16.0 * np.sqrt(dx ** 2 + dy ** 2)
self._segment_step_rad = 0.1 * pi / 180
self._do_border = True
self._gratdef = {}
self._gratdef["local"] = False
self._gratdef["dpar"] = 30.0
def set_format(self, f):
"""Set the format string for value display
"""
self._format = f
return f
def set_coordprec(self, n):
"""Set the number of digits after floating point for coord display.
"""
self._coordprec = n
def format_coord(self, x, y):
"""Format the coordinate for display in status bar. Take projection
into account.
"""
format = self._format + " at "
pos = self.get_lonlat(x, y)
if pos is None or np.isnan(pos).any():
return ""
lon, lat = np.around(pos, decimals=self._coordprec)
val = self.get_value(x, y)
if val is None:
format = "%s"
val = ""
elif type(val) is str:
format = "%s @ "
coordsys = self.proj.coordsysstr
if coordsys != "":
res = (format + "(%g, %g) in %s") % (val, lon, lat, coordsys[0:3])
else:
res = (format + "lon=%g, lat=%g") % (val, lon, lat)
return res
def get_lonlat(self, x, y):
"""Get the coordinate in the coord system of the image, in lon/lat in deg.
"""
lon, lat = self.proj.xy2ang(x, y, lonlat=True)
return lon, lat
def get_value(self, x, y):
"""Get the value of the map at position x,y
"""
if len(self.get_images()) < 1:
return None
im = self.get_images()[-1]
arr = im.get_array()
i, j = self.proj.xy2ij(x, y)
if i is None or j is None:
return None
elif arr.mask is not np.ma.nomask and arr.mask[i, j]:
return "UNSEEN"
else:
return arr[i, j]
def projmap(
self,
map,
vec2pix_func,
vmin=None,
vmax=None,
badval=UNSEEN,
badcolor="gray",
bgcolor="white",
cmap=None,
norm=None,
rot=None,
coord=None,
**kwds
):
"""Project a map on the SphericalProjAxes.
Parameters
----------
map : array-like
The map to project.
vec2pix_func : function
The function describing the pixelisation.
vmin, vmax : float, scalars
min and max value to use instead of min max of the map
badval : float
The value of the bad pixels
badcolor : str
Color to use to plot bad values
bgcolor : str
Color to use for background
cmap : a color map
The colormap to use (see matplotlib.cm)
rot : sequence
In the form (lon, lat, psi) (unit: degree):the center of the map is
at (lon, lat) and rotated by angle psi around that direction.
coord : {'G', 'E', 'C', None}
The coordinate system of the map ('G','E' or 'C'), rotate
the map if different from the axes coord syst.
Notes
-----
Other keywords are transmitted to :func:`matplotlib.Axes.imshow`
"""
img = self.proj.projmap(map, vec2pix_func, rot=rot, coord=coord)
w = ~(np.isnan(img) | np.isinf(img) | pixelfunc.mask_bad(img, badval=badval))
try:
if vmin is None:
vmin = img[w].min()
except ValueError:
vmin = 0.0
try:
if vmax is None:
vmax = img[w].max()
except ValueError:
vmax = 0.0
if vmin > vmax:
vmin = vmax
if vmin == vmax:
vmin -= 1.0
vmax += 1.0
cm, nn = get_color_table(vmin, vmax, img[w], cmap=cmap, norm=norm,
badcolor=badcolor, bgcolor=bgcolor)
ext = self.proj.get_extent()
img = np.ma.masked_values(img, badval)
aximg = self.imshow(
img,
extent=ext,
cmap=cm,
norm=nn,
interpolation="nearest",
origin="lower",
vmin=vmin,
vmax=vmax,
**kwds
)
xmin, xmax, ymin, ymax = self.proj.get_extent()
self.set_xlim(xmin, xmax)
self.set_ylim(ymin, ymax)
return img
def projplot(self, *args, **kwds):
"""projplot is a wrapper around :func:`matplotlib.Axes.plot` to take into account the
spherical projection.
You can call this function as::
projplot(theta, phi) # plot a line going through points at coord (theta, phi)
projplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
projplot(thetaphi) # plot a line going through points at coord (thetaphi[0], thetaphi[1])
projplot(thetaphi, 'bx') # idem but with blue 'x'
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot. Can be put into one 2-d array, first line is
then *theta* and second line is *phi*. See *lonlat* parameter for unit.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
lonlat : bool, optional
If True, theta and phi are interpreted as longitude and latitude
in degree, otherwise, as colatitude and longitude in radian
coord : {'E', 'G', 'C', None}
The coordinate system of the points, only used if the coordinate
coordinate system of the Axes has been defined and in this
case, a rotation is performed
rot : None or sequence
rotation to be applied =(lon, lat, psi) : lon, lat will be position of the
new Z axis, and psi is rotation around this axis, all in degree.
if None, no rotation is performed
direct : bool
if True, the rotation to center the projection is not
taken into account
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
See Also
--------
projscatter, projtext
"""
fmt = None
if len(args) < 1:
raise ValueError("No argument given")
if len(args) == 1:
theta, phi = np.asarray(args[0])
elif len(args) == 2:
if type(args[1]) is str:
fmt = args[1]
theta, phi = np.asarray(args[0])
else:
theta, phi = np.asarray(args[0]), np.asarray(args[1])
elif len(args) == 3:
if type(args[2]) is not str:
raise TypeError("Third argument must be a string")
else:
theta, phi = np.asarray(args[0]), np.asarray(args[1])
fmt = args[2]
else:
raise TypeError("Three args maximum")
rot = kwds.pop("rot", None)
if rot is not None:
rot = np.array(np.atleast_1d(rot), copy=1)
rot.resize(3)
rot[1] = rot[1] - 90.0
coord = self.proj.mkcoord(kwds.pop("coord", None))[::-1]
lonlat = kwds.pop("lonlat", False)
vec = R.dir2vec(theta, phi, lonlat=lonlat)
vec = (R.Rotator(rot=rot, coord=coord, eulertype="Y")).I(vec)
x, y = self.proj.vec2xy(vec, direct=kwds.pop("direct", False))
x, y = self._make_segment(
x, y, threshold=kwds.pop("threshold", self._segment_threshold)
)
thelines = []
for xx, yy in zip(x, y):
if fmt is not None:
try: # works in matplotlib 1.3 and earlier
linestyle, marker, color = matplotlib.axes._process_plot_format(fmt)
except: # matplotlib 1.4 and later
linestyle, marker, color = matplotlib.axes._axes._process_plot_format(
fmt
)
kwds.setdefault("linestyle", linestyle)
kwds.setdefault("marker", marker)
if color is not None:
kwds.setdefault("color", color)
l = matplotlib.lines.Line2D(xx, yy, **kwds)
self.add_line(l)
thelines.append(l)
return thelines
def projscatter(self, theta, phi=None, *args, **kwds):
"""Projscatter is a wrapper around :func:`matplotlib.Axes.scatter` to take into account the
spherical projection.
You can call this function as::
projscatter(theta, phi) # plot points at coord (theta, phi)
projplot(thetaphi) # plot points at coord (thetaphi[0], thetaphi[1])
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot. Can be put into one 2-d array, first line is
then *theta* and second line is *phi*. See *lonlat* parameter for unit.
lonlat : bool, optional
If True, theta and phi are interpreted as longitude and latitude
in degree, otherwise, as colatitude and longitude in radian
coord : {'E', 'G', 'C', None}, optional
The coordinate system of the points, only used if the coordinate
coordinate system of the axes has been defined and in this
case, a rotation is performed
rot : None or sequence, optional
rotation to be applied =(lon, lat, psi) : lon, lat will be position of the
new Z axis, and psi is rotation around this axis, all in degree.
if None, no rotation is performed
direct : bool, optional
if True, the rotation to center the projection is not
taken into account
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
See Also
--------
projplot, projtext
"""
save_input_data = hasattr(self.figure, "zoomtool")
if save_input_data:
input_data = (theta, phi, args, kwds.copy())
if phi is None:
theta, phi = np.asarray(theta)
else:
theta, phi = np.asarray(theta), np.asarray(phi)
rot = kwds.pop("rot", None)
if rot is not None:
rot = np.array(np.atleast_1d(rot), copy=1)
rot.resize(3)
rot[1] = rot[1] - 90.0
coord = self.proj.mkcoord(kwds.pop("coord", None))[::-1]
lonlat = kwds.pop("lonlat", False)
vec = R.dir2vec(theta, phi, lonlat=lonlat)
vec = (R.Rotator(rot=rot, coord=coord, eulertype="Y")).I(vec)
x, y = self.proj.vec2xy(vec, direct=kwds.pop("direct", False))
s = self.scatter(x, y, *args, **kwds)
if save_input_data:
if not hasattr(self, "_scatter_data"):
self._scatter_data = []
self._scatter_data.append((s, input_data))
return s
def projtext(self, theta, phi, s, **kwds):
"""Projtext is a wrapper around :func:`matplotlib.Axes.text` to take into account the
spherical projection.
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot. Can be put into one 2-d array, first line is
then *theta* and second line is *phi*. See *lonlat* parameter for unit.
text : str
The text to be displayed.
lonlat : bool, optional
If True, theta and phi are interpreted as longitude and latitude
in degree, otherwise, as colatitude and longitude in radian
coord : {'E', 'G', 'C', None}, optional
The coordinate system of the points, only used if the coordinate
coordinate system of the axes has been defined and in this
case, a rotation is performed
rot : None or sequence, optional
rotation to be applied =(lon, lat, psi) : lon, lat will be position of the
new Z axis, and psi is rotation around this axis, all in degree.
if None, no rotation is performed
direct : bool, optional
if True, the rotation to center the projection is not
taken into account
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.text`.
See Also
--------
projplot, projscatter
"""
if phi is None:
theta, phi = np.asarray(theta)
else:
theta, phi = np.asarray(theta), np.asarray(phi)
rot = kwds.pop("rot", None)
if rot is not None:
rot = np.array(np.atleast_1d(rot), copy=1)
rot.resize(3)
rot[1] = rot[1] - 90.0
coord = self.proj.mkcoord(kwds.pop("coord", None))[::-1]
lonlat = kwds.pop("lonlat", False)
vec = R.dir2vec(theta, phi, lonlat=lonlat)
vec = (R.Rotator(rot=rot, coord=coord, eulertype="Y")).I(vec)
x, y = self.proj.vec2xy(vec, direct=kwds.pop("direct", False))
return self.text(x, y, s, **kwds)
def _make_segment(self, x, y, threshold=None):
if threshold is None:
threshold = self._segment_threshold
x, y = np.atleast_1d(x), np.atleast_1d(y)
d2 = np.sqrt((np.roll(x, 1) - x) ** 2 + (np.roll(y, 1) - y) ** 2)
w = np.where(d2 > threshold)[0]
# w=w[w!=0]
xx = []
yy = []
if len(w) == 1:
x = np.roll(x, -w[0])
y = np.roll(y, -w[0])
xx.append(x)
yy.append(y)
elif len(w) >= 2:
xx.append(x[0 : w[0]])
yy.append(y[0 : w[0]])
for i in six.moves.xrange(len(w) - 1):
xx.append(x[w[i] : w[i + 1]])
yy.append(y[w[i] : w[i + 1]])
xx.append(x[w[-1] :])
yy.append(y[w[-1] :])
else:
xx.append(x)
yy.append(y)
return xx, yy
def get_parallel_interval(self, vx, vy=None, vz=None):
"""Get the min and max value of theta of the parallel to cover the
field of view.
Input:
- the normalized vector of the direction of the center of the
projection, in the reference frame of the graticule.
Return:
- vmin,vmax : between 0 and pi, vmin<vmax, the interval of theta
for the parallels crossing the field of view
"""
if vy is None and vz is None:
vx, vy, vz = vx
elif vy is None or vz is None:
raise ValueError("Both vy and vz must be given or both not given")
a = np.arccos(vz)
fov = self.proj.get_fov()
vmin = max(0.0, a - fov / 2.0)
vmax = min(pi, a + fov / 2.0)
return vmin, vmax
def get_meridian_interval(self, vx, vy=None, vz=None):
"""Get the min and max value of phi of the meridians to cover the field
of view.
Input:
- the normalized vector of the direction of the center of the
projection, in the reference frame of the graticule.
Return:
- vmin,vmax : the interval of phi for the
meridians crossing the field of view.
"""
if vy is None and vz is None:
vx, vy, vz = vx
elif vy is None or vz is None:
raise ValueError("Both vy and vz must be given or both not given")
fov = self.proj.get_fov()
th = np.arccos(vz)
if th <= fov / 2.0: # test whether north pole is visible
return -np.pi, np.pi
if abs(th - pi) <= fov / 2.0: # test whether south pole is visible
return -np.pi, np.pi
sth = np.sin(th)
phi0 = np.arctan2(vy, vx)
return phi0 - fov / sth / 2.0, phi0 + fov / sth / 2.0
def graticule(
self, dpar=None, dmer=None, coord=None, local=None, verbose=True, **kwds
):
"""Draw a graticule.
Input:
- dpar: angular separation between parallels in degree
- dmer: angular separation between meridians in degree
- coord: coordinate system of the graticule ('G', 'E' or 'C')
- local: if True, no rotation performed at all
"""
gratargs = (dpar, dmer, coord, local)
gratkwds = kwds
if dpar is None:
dpar = self._gratdef["dpar"]
if local is None:
local = self._gratdef["local"]
if dmer is None:
dmer = dpar
dpar = abs(dpar) * dtor
dmer = abs(dmer) * dtor
if not local:
vec = R.dir2vec(self.proj.get_center())
vec0 = R.Rotator(coord=self.proj.mkcoord(coord=coord)).I(vec)
else:
vec = (1, 0, 0)
vec0 = (1, 0, 0)
u_pmin, u_pmax = kwds.pop("pmax", None), kwds.pop("pmin", None)
u_mmin, u_mmax = kwds.pop("mmin", None), kwds.pop("mmax", None)
if u_pmin:
u_pmin = (pi / 2.0 - u_pmin * dtor) % pi
if u_pmax:
u_pmax = (pi / 2.0 - u_pmax * dtor) % pi
if u_mmin:
u_mmin = (((u_mmin + 180.0) % 360) - 180) * dtor
if u_mmax:
u_mmax = (((u_mmax + 180.0) % 360) - 180) * dtor
pmin, pmax = self.get_parallel_interval(vec0)
mmin, mmax = self.get_meridian_interval(vec0)
if u_pmin:
pmin = u_pmin
if u_pmax:
pmax = u_pmax
if u_mmin:
mmin = u_mmin
if u_mmax:
mmax = u_pmax
if verbose:
print(
"{0} {1} {2} {3}".format(
pmin / dtor, pmax / dtor, mmin / dtor, mmax / dtor
)
)
if not kwds.pop("force", False):
dpar, dmer = self._get_interv_graticule(
pmin, pmax, dpar, mmin, mmax, dmer, verbose=verbose
)
theta_list = np.around(np.arange(pmin, pmax + 0.5 * dpar, dpar) / dpar) * dpar
phi_list = np.around(np.arange(mmin, mmax + 0.5 * dmer, dmer) / dmer) * dmer
theta = np.arange(
pmin, pmax, min((pmax - pmin) / 100.0, self._segment_step_rad)
)
phi = np.arange(mmin, mmax, min((mmax - mmin) / 100.0, self._segment_step_rad))
equator = False
gratlines = []
kwds.setdefault("lw", 1)
kwds.setdefault("color", "k")
for t in theta_list:
if abs(t - pi / 2.0) < 1.0e-10:
fmt = "-"
equator = True
elif abs(t) < 1.0e-10: # special case: north pole
t = 1.0e-10
fmt = "-"
elif abs(t - pi) < 1.0e-10: # special case: south pole
t = pi - 1.0e-10
fmt = "-"
else:
fmt = ":"
gratlines.append(
self.projplot(
phi * 0.0 + t, phi, fmt, coord=coord, direct=local, **kwds
)
)
if not equator and pmin <= pi / 2.0 and pi / 2 <= pmax:
gratlines.append(
self.projplot(
phi * 0.0 + pi / 2.0, phi, "-", coord=coord, direct=local, **kwds
)
)
for p in phi_list:
if abs(p) < 1.0e-10:
fmt = "-"
else:
fmt = ":"
gratlines.append(
self.projplot(
theta, theta * 0.0 + p, fmt, coord=coord, direct=local, **kwds
)
)
# Now the borders (only useful for full sky projection)
if hasattr(self, "_do_border") and self._do_border:
theta = np.arange(0, 181) * dtor
gratlines.append(
self.projplot(theta, theta * 0 - pi, "-k", lw=1, direct=True)
)
gratlines.append(
self.projplot(theta, theta * 0 + 0.9999 * pi, "-k", lw=1, direct=True)
)
phi = np.arange(-180, 180) * dtor
gratlines.append(
self.projplot(phi * 0 + 1.0e-10, phi, "-k", lw=1, direct=True)
)
gratlines.append(
self.projplot(phi * 0 + pi - 1.0e-10, phi, "-k", lw=1, direct=True)
)
if hasattr(self, "_graticules"):
self._graticules.append((gratargs, gratkwds, gratlines))
else:
self._graticules = [(gratargs, gratkwds, gratlines)]
return dpar, dmer
def delgraticules(self):
"""Delete all graticules previously created on the Axes.
"""
if hasattr(self, "_graticules"):
for dum1, dum2, g in self._graticules:
for gl in g:
for l in gl:
if l in self.lines:
self.lines.remove(l)
else:
print("line not in lines")
del self._graticules
def _get_interv_graticule(self, pmin, pmax, dpar, mmin, mmax, dmer, verbose=True):
def set_prec(d, n, nn=2):
arcmin = False
if d / n < 1.0:
d *= 60
arcmin = True
nn = 1
x = d / n
y = nn * x
ex = np.floor(np.log10(y))
z = np.around(y / 10 ** ex) * 10 ** ex / nn
if arcmin:
z = 1.0 / np.around(60.0 / z)
return z
max_n_par = 18
max_n_mer = 36
n_par = (pmax - pmin) / dpar
n_mer = (mmax - mmin) / dmer
if n_par > max_n_par:
dpar = set_prec((pmax - pmin) / dtor, max_n_par / 2) * dtor
if n_mer > max_n_mer:
dmer = set_prec((mmax - mmin) / dtor, max_n_mer / 2, nn=1) * dtor
if dmer / dpar < 0.2 or dmer / dpar > 5.0:
dmer = dpar = max(dmer, dpar)
vdeg = int(np.floor(np.around(dpar / dtor, 10)))
varcmin = (dpar / dtor - vdeg) * 60.0
if verbose:
print(
"The interval between parallels is {0:d} deg {1:.2f}'.".format(
vdeg, varcmin
)
)
vdeg = int(np.floor(np.around(dmer / dtor, 10)))
varcmin = (dmer / dtor - vdeg) * 60.0
if verbose:
print(
"The interval between meridians is {0:d} deg {1:.2f}'.".format(
vdeg, varcmin
)
)
return dpar, dmer
class GnomonicAxes(SphericalProjAxes):
"""Define a gnomonic Axes to handle gnomonic projection.
Input:
- rot=, coord= : define rotation and coordinate system. See rotator.
- coordprec= : number of digit after floating point for coordinates display.
- format= : format string for value display.
Other keywords from Axes (see Axes).
"""
def __init__(self, *args, **kwds):
kwds.setdefault("coordprec", 3)
super(GnomonicAxes, self).__init__(P.GnomonicProj, *args, **kwds)
self._do_border = False
self._gratdef["local"] = True
self._gratdef["dpar"] = 1.0
def projmap(self, map, vec2pix_func, xsize=200, ysize=None, reso=1.5, **kwds):
self.proj.set_proj_plane_info(xsize=xsize, ysize=ysize, reso=reso)
return super(GnomonicAxes, self).projmap(map, vec2pix_func, **kwds)
class HpxGnomonicAxes(GnomonicAxes):
def projmap(self, map, nest=False, **kwds):
nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
f = lambda x, y, z: pixelfunc.vec2pix(nside, x, y, z, nest=nest)
xsize = kwds.pop("xsize", 200)
ysize = kwds.pop("ysize", None)
reso = kwds.pop("reso", 1.5)
return super(HpxGnomonicAxes, self).projmap(
map, f, xsize=xsize, ysize=ysize, reso=reso, **kwds
)
class MollweideAxes(SphericalProjAxes):
"""Define a mollweide Axes to handle mollweide projection.
Input:
- rot=, coord= : define rotation and coordinate system. See rotator.
- coordprec= : number of digit after floating point for coordinates display.
- format= : format string for value display.
Other keywords from Axes (see Axes).
"""
def __init__(self, *args, **kwds):
kwds.setdefault("coordprec", 2)
super(MollweideAxes, self).__init__(P.MollweideProj, *args, **kwds)
self.set_xlim(-2.01, 2.01)
self.set_ylim(-1.01, 1.01)
def projmap(self, map, vec2pix_func, xsize=800, **kwds):
self.proj.set_proj_plane_info(xsize=xsize)
img = super(MollweideAxes, self).projmap(map, vec2pix_func, **kwds)
self.set_xlim(-2.01, 2.01)
self.set_ylim(-1.01, 1.01)
return img
class HpxMollweideAxes(MollweideAxes):
def projmap(self, map, nest=False, **kwds):
nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
f = lambda x, y, z: pixelfunc.vec2pix(nside, x, y, z, nest=nest)
return super(HpxMollweideAxes, self).projmap(map, f, **kwds)
class CartesianAxes(SphericalProjAxes):
"""Define a cylindrical Axes to handle cylindrical projection.
"""
def __init__(self, *args, **kwds):
kwds.setdefault("coordprec", 2)
super(CartesianAxes, self).__init__(P.CartesianProj, *args, **kwds)
self._segment_threshold = 180
self._segment_step_rad = 0.1 * pi / 180
self._do_border = True
def projmap(
self, map, vec2pix_func, xsize=800, ysize=None, lonra=None, latra=None, **kwds
):
self.proj.set_proj_plane_info(
xsize=xsize, ysize=ysize, lonra=lonra, latra=latra
)
return super(CartesianAxes, self).projmap(map, vec2pix_func, **kwds)
class HpxCartesianAxes(CartesianAxes):
def projmap(self, map, nest=False, **kwds):
nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
f = lambda x, y, z: pixelfunc.vec2pix(nside, x, y, z, nest=nest)
return super(HpxCartesianAxes, self).projmap(map, f, **kwds)
class OrthographicAxes(SphericalProjAxes):
"""Define an orthographic Axes to handle orthographic projection.
Input:
- rot=, coord= : define rotation and coordinate system. See rotator.
- coordprec= : num of digits after floating point for coordinates display.
- format= : format string for value display.
Other keywords from Axes (see Axes).
"""
def __init__(self, *args, **kwds):
kwds.setdefault("coordprec", 2)
super(OrthographicAxes, self).__init__(P.OrthographicProj, *args, **kwds)
self._segment_threshold = 0.01
self._do_border = False
def projmap(self, map, vec2pix_func, xsize=800, half_sky=False, **kwds):
self.proj.set_proj_plane_info(xsize=xsize, half_sky=half_sky)
img = super(OrthographicAxes, self).projmap(map, vec2pix_func, **kwds)
if half_sky:
ratio = 1.01
else:
ratio = 2.01
self.set_xlim(-ratio, ratio)
self.set_ylim(-1.01, 1.01)
return img
class HpxOrthographicAxes(OrthographicAxes):
def projmap(self, map, nest=False, **kwds):
nside = pixelfunc.npix2nside(len(map))
f = lambda x, y, z: pixelfunc.vec2pix(nside, x, y, z, nest=nest)
return super(HpxOrthographicAxes, self).projmap(map, f, **kwds)
class AzimuthalAxes(SphericalProjAxes):
"""Define an Azimuthal Axes to handle azimuthal equidistant or
Lambert azimuthal equal-area projections.
Input:
- rot=, coord= : define rotation and coordinate system. See rotator.
- coordprec= : number of digit after floating point for coordinates display.
- format= : format string for value display.
Other keywords from Axes (see Axes).
"""
def __init__(self, *args, **kwds):
kwds.setdefault("coordprec", 3)
super(AzimuthalAxes, self).__init__(P.AzimuthalProj, *args, **kwds)
self._do_border = False
def projmap(
self,
map,
vec2pix_func,
xsize=200,
ysize=None,
reso=1.5,
lamb=True,
half_sky=False,
**kwds
):
self.proj.set_proj_plane_info(
xsize=xsize, ysize=ysize, reso=reso, lamb=lamb, half_sky=half_sky
)
return super(AzimuthalAxes, self).projmap(map, vec2pix_func, **kwds)
class HpxAzimuthalAxes(AzimuthalAxes):
def projmap(self, map, nest=False, **kwds):
nside = pixelfunc.npix2nside(pixelfunc.get_map_size(map))
f = lambda x, y, z: pixelfunc.vec2pix(nside, x, y, z, nest=nest)
xsize = kwds.pop("xsize", 800)
ysize = kwds.pop("ysize", None)
reso = kwds.pop("reso", 1.5)
lamb = kwds.pop("lamb", True)
return super(HpxAzimuthalAxes, self).projmap(
map, f, xsize=xsize, ysize=ysize, reso=reso, lamb=lamb, **kwds
)
###################################################################
#
# Table color for mollview, gnomview, and orthview.
# Currently defined for so that the default colormap, found in
# matplotlib.rcParams['image.cmap'], the data is displayed with
# values greater than vmax as the final element of the colormap,
# masked indices gray, and the background set to white.
#
# With matplotlib.rcParams['image.cmap'] assigned to a string
# corresponding to a standard matplotlib colormap, one can call
# hp.mollview(m) and have the map projected in the standard way,
# whereas using just, e.g., hp.mollview(m, cmap='jet') will display
# the data with a non-white background.
#
# One can set the default colormap in the matplotlibrc file, or set
# it in situ:
# >>> matplotlib.rcParam['image.cmap'] = 'coolwarm'
# >>> hp.mollview(m)
# Note that custom colormaps can also be used, but they need to be
# registered ahead fo time, as shown in
# http://matplotlib.org/examples/pylab_examples/custom_cmap.html
def get_color_table(vmin, vmax, val, cmap=None, norm=None,
badcolor="gray", bgcolor="white"):
# Create color table
newcmap = create_colormap(cmap, badcolor=badcolor, bgcolor=bgcolor)
if type(norm) is str:
if norm.lower().startswith("log"):
norm = LogNorm2(clip=False)
elif norm.lower().startswith("hist"):
norm = HistEqNorm(clip=False)
else:
norm = None
if norm is None:
norm = LinNorm2(clip=False)
norm.vmin = vmin
norm.vmax = vmax
norm.autoscale_None(val)
return newcmap, norm
def create_colormap(cmap, badcolor="gray", bgcolor="white"):
if type(cmap) == str:
cmap0 = matplotlib.cm.get_cmap(cmap)
elif type(cmap) in [
matplotlib.colors.LinearSegmentedColormap,
matplotlib.colors.ListedColormap,
]:
cmap0 = cmap
else:
cmap0 = matplotlib.cm.get_cmap(matplotlib.rcParams["image.cmap"])
if hasattr(cmap0, "_segmentdata"):
newcm = matplotlib.colors.LinearSegmentedColormap(
"newcm", cmap0._segmentdata, cmap0.N
)
else:
newcm = cmap0
newcm.set_over(newcm(1.0))
newcm.set_under(bgcolor)
newcm.set_bad(badcolor)
return newcm
##################################################################
#
# A Locator that gives the bounds of the interval
#
class BoundaryLocator(matplotlib.ticker.Locator):
def __init__(self, N=2, norm=None):
if N < 2:
raise ValueError("Number of locs must be greater than 1")
self.Nlocs = N
self.norm = norm
def __call__(self):
if matplotlib.__version__ < "0.98":
vmin, vmax = self.viewInterval.get_bounds()
else:
vmin, vmax = self.axis.get_view_interval()
if self.norm == "log":
locs = np.log10(vmin) + np.arange(self.Nlocs) * (
np.log10(vmax) - np.log10(vmin)
) / (self.Nlocs - 1.0)
locs = 10 ** (locs)
else:
locs = vmin + np.arange(self.Nlocs) * (vmax - vmin) / (self.Nlocs - 1.0)
return locs
def autoscale(self):
self.verify_intervals()
vmin, vmax = self.dataInterval.get_bounds()
if vmax < vmin:
vmin, vmax = vmax, vmin
if vmin == vmax:
vmin -= 1
vmax += 1
return vmin, vmax
##################################################################
#
# A normalization class to get color table equalised by
# the histogram of data
#
class HistEqNorm(matplotlib.colors.Normalize):
def __init__(self, vmin=None, vmax=None, clip=False):
matplotlib.colors.Normalize.__init__(self, vmin, vmax, clip)
self.xval = None
self.yval = None
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if np.iterable(value):
vtype = "array"
val = np.ma.asarray(value).astype(np.float)
else:
vtype = "scalar"
val = np.ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = float(self.vmin), float(self.vmax)
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
return 0.0 * val
else:
if clip:
mask = np.ma.getmask(val)
val = np.ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask)
result = np.ma.array(
np.interp(val, self.xval, self.yval), mask=np.ma.getmask(val)
)
result[np.isinf(val.data)] = -np.inf
if vtype == "scalar":
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
if np.iterable(value):
vtype = "array"
val = np.ma.array(value)
else:
vtype = "scalar"
val = np.ma.array([value])
result = np.ma.array(
self._lininterp(val, self.yval, self.xval), mask=np.ma.getmask(val)
)
result[np.isinf(val.data)] = -np.inf
if vtype == "scalar":
result = result[0]
return result
def autoscale_None(self, val):
changed = False
if self.vmin is None:
self.vmin = val.min()
changed = True
if self.vmax is None:
self.vmax = val.max()
changed = True
if changed or self.xval is None or self.yval is None:
self._set_xyvals(val)
def autoscale(self, val):
self.vmin = val.min()
self.vmax = val.max()
self._set_xyvals(val)
def _set_xyvals(self, val):
data = np.ma.asarray(val).ravel()
w = np.isinf(data.data)
if data.mask is not np.ma.nomask:
w = w | data.mask
data2 = data.data[~w]
if data2.size < 3:
self.yval = np.array([0, 1], dtype=np.float)
self.xval = np.array([self.vmin, self.vmax], dtype=np.float)
return
bins = min(data2.size // 20, 5000)
if bins < 3:
bins = data2.size
try:
# for numpy 1.1, use new bins format (left and right edges)
hist, bins = np.histogram(
data2, bins=bins, range=(self.vmin, self.vmax), new=True
)
except TypeError:
# for numpy <= 1.0 or numpy >= 1.2, no new keyword
hist, bins = np.histogram(data2, bins=bins, range=(self.vmin, self.vmax))
if bins.size == hist.size + 1:
# new bins format, remove last point
bins = bins[:-1]
hist = hist.astype(np.float) / np.float(hist.sum())
self.yval = np.concatenate([[0.0], hist.cumsum(), [1.0]])
self.xval = np.concatenate(
[[self.vmin], bins + 0.5 * (bins[1] - bins[0]), [self.vmax]]
)
def _lininterp(self, x, X, Y):
if hasattr(x, "__len__"):
xtype = "array"
xx = np.asarray(x).astype(np.float)
else:
xtype = "scalar"
xx = np.asarray([x]).astype(np.float)
idx = X.searchsorted(xx)
yy = xx * 0
yy[idx > len(X) - 1] = Y[-1] # over
yy[idx <= 0] = Y[0] # under
wok = np.where((idx > 0) & (idx < len(X))) # the good ones
iok = idx[wok]
yywok = Y[iok - 1] + (
(Y[iok] - Y[iok - 1]) / (X[iok] - X[iok - 1]) * (xx[wok] - X[iok - 1])
)
w = np.where(((X[iok] - X[iok - 1]) == 0)) # where are the nan ?
yywok[w] = Y[iok[w] - 1] # replace by previous value
wl = np.where(xx[wok] == X[0])
yywok[wl] = Y[0]
wh = np.where(xx[wok] == X[-1])
yywok[wh] = Y[-1]
yy[wok] = yywok
if xtype == "scalar":
yy = yy[0]
return yy
##################################################################
#
# A normalization class to get logarithmic color table
#
class LogNorm2(matplotlib.colors.Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if np.iterable(value):
vtype = "array"
val = np.ma.asarray(value).astype(np.float)
else:
vtype = "scalar"
val = np.ma.array([value]).astype(np.float)
val = np.ma.masked_where(np.isinf(val.data), val)
self.autoscale_None(val)
vmin, vmax = float(self.vmin), float(self.vmax)
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin <= 0:
raise ValueError("values must all be positive")
elif vmin == vmax:
return type(value)(0.0 * np.asarray(value))
else:
if clip:
mask = np.ma.getmask(val)
val = np.ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask)
result = (np.ma.log(val) - np.log(vmin)) / (np.log(vmax) - np.log(vmin))
result.data[result.data < 0] = 0.0
result.data[result.data > 1] = 1.0
result[np.isinf(val.data)] = -np.inf
if result.mask is not np.ma.nomask:
result.mask[np.isinf(val.data)] = False
if vtype == "scalar":
result = result[0]
return result
def autoscale_None(self, A):
" autoscale only None-valued vmin or vmax"
if self.vmin is None or self.vmax is None:
val = np.ma.masked_where(np.isinf(A.data), A)
matplotlib.colors.Normalize.autoscale_None(self, val)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = float(self.vmin), float(self.vmax)
if np.iterable(value):
val = np.ma.asarray(value)
return vmin * np.ma.power((vmax / vmin), val)
else:
return vmin * np.pow((vmax / vmin), value)
##################################################################
#
# A normalization class to get linear color table
#
class LinNorm2(matplotlib.colors.Normalize):
"""
Normalize a given value to the 0-1 range on a lin scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if np.iterable(value):
vtype = "array"
val = np.ma.asarray(value).astype(np.float)
else:
vtype = "scalar"
val = np.ma.array([value]).astype(np.float)
winf = np.isinf(val.data)
val = np.ma.masked_where(winf, val)
self.autoscale_None(val)
vmin, vmax = float(self.vmin), float(self.vmax)
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
return type(value)(0.0 * np.asarray(value))
else:
if clip:
mask = np.ma.getmask(val)
val = np.ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask)
result = (val - vmin) * (1.0 / (vmax - vmin))
result.data[result.data < 0] = 0.0
result.data[result.data > 1] = 1.0
result[winf] = -np.inf
if result.mask is not np.ma.nomask:
result.mask[winf] = False
if vtype == "scalar":
result = result[0]
return result
def autoscale_None(self, A):
" autoscale only None-valued vmin or vmax"
if self.vmin is None or self.vmax is None:
val = np.ma.masked_where(np.isinf(A.data), A)
matplotlib.colors.Normalize.autoscale_None(self, val)
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = float(self.vmin), float(self.vmax)
if np.iterable(value):
val = np.ma.asarray(value)
return vmin + (vmax - vmin) * val
else:
return vmin + (vmax - vmin) * value
| gpl-2.0 |
asadziach/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py | 62 | 9268 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
| apache-2.0 |
spallavolu/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
eric-haibin-lin/mxnet | docs/python_docs/python/scripts/conf.py | 6 | 7703 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import sys, os, re, subprocess
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# -- mock out modules
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn']
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5.6'
# General information about the project.
project = u'Apache MXNet'
author = u'%s developers' % project
copyright = u'2015-2019, %s' % author
github_doc_root = 'https://github.com/apache/incubator-mxnet/tree/master/docs/'
doc_root = 'https://mxnet.apache.org/'
# add markdown parser
source_parsers = {
'.md': CommonMarkParser,
}
# Version information.
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# 'sphinxcontrib.fulltoc',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
# 'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
# 'sphinx.ext.mathjax',
# 'sphinx.ext.viewcode',
'breathe',
# 'mxdoc'
'autodocsumm',
]
doctest_global_setup = '''
import mxnet as mx
'''
autodoc_member_order = 'alphabetical'
autodoc_default_flags = ['members', 'show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.ipynb', '.md', '.Rmd']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Version and release are passed from CMake.
#version = None
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['templates',
# 'api',
'guide/modules/others', 'guide/guide', 'blog']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
suppress_warnings = [
'image.nonlocal_uri',
]
# -- Options for HTML output ---------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../themes/mx-theme']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mxtheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'primary_color': 'blue',
'accent_color': 'deep_orange',
'show_footer': True,
'relative_url': os.environ.get('SPHINX_RELATIVE_URL', '/')
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../../_static/mxnet_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../../_static/mxnet-icon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../../_static']
html_css_files = [
'mxnet.css',
]
html_js_files = [
'autodoc.js'
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': 'relations.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'formatdoc'
nbsphinx_execute = 'never'
# let the source file format to be xxx.ipynb instead of xxx.ipynb.txt
html_sourcelink_suffix = ''
def setup(app):
app.add_transform(AutoStructify)
app.add_config_value('recommonmark_config', {
}, True)
app.add_javascript('google_analytics.js')
import mxtheme
app.add_directive('card', mxtheme.CardDirective)
| apache-2.0 |
justincassidy/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.