repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
wavemoth/wavemoth | examples/butterfly_accuracy.py | 1 | 1448 | from __future__ import division
import sys
sys.path.insert(0, '..')
import numpy as np
from numpy.linalg import norm
from matplotlib import pyplot as plt
from time import clock
from wavemoth.fastsht import ShtPlan
from wavemoth.psht import PshtMmajorHealpix
from wavemoth.healpix import get_ring_thetas
from wavemoth.legendre import *
from wavemoth.butterfly import *
def plot_map(m):
from cmb.maps import pixel_sphere_map
pixel_sphere_map(m[0, :]).plot()
Nsides = [16]#64, 128, 256]
#m_fractions = [0, 0.25, 0.45, 0.9, 1]
plt.clf()
min_rows = 129
eps = 1e-10
for Nside in Nsides:
lmax = 30#2 * Nside
odd = 1
for m in [20]:#range(lmax + 1):
# m = int(p * lmax)
nodes = get_ring_thetas(Nside, positive_only=True)
P = compute_normalized_associated_legendre(m, nodes, lmax,
epsilon=1e-30)[:, odd::2]
print P.shape
C = butterfly_compress(P, min_rows=min_rows, eps=eps)
print C.get_stats()
residuals = []
a_l = np.zeros(P.shape[1])
for l in range(0, P.shape[1]):
a_l[l] = 1
x = C.apply(a_l[:, None])[:, 0]
d = np.dot(P, a_l)
a_l[l] = 0
residuals.append(norm(x - d) / norm(d))
if np.all(np.asarray(residuals) == 0):
print 'all 0'
else:
plt.semilogy(residuals)
plt.gca().set_ylim((1e-20, 1))
| gpl-2.0 |
SylvainGuieu/smartplotlib | base.py | 1 | 18316 | from __future__ import division, absolute_import, print_function
from .recursive import (RecObject, RecFunc, CatRecObject, KWS, alias,
_value_formater, _list_formater, rproperty)
from .stack import stack
import inspect
param_docs = {
"color" : "this is the color",
"linewidth" : "this is the line width"
}
class _Style:
def get_style_params(self, style):
def gs(p,new):
for k, v in new.get("__styles__", {}).get(style, {}).iteritems():
p.setdefault(k,v)
p = {}
gs(p, self.locals)
self._history.map(gs,p)
args = getattr(self, "args", None)
if args:
return {k:v for k,v in p.iteritems() if k in args}
return p
def new_style(self, style, _kw_={}, **kwargs):
if not "__styles__" in self.locals:
self.locals["__styles__"] = {}
self.locals["__styles__"][style] = kwargs
for k,v in _kw_.iteritems():
self.set_style_param(style, k, v)
def set_style_param(self, style, path, value):
obj, item = self.__getitempath__(path)
if not len(item):
if not isinstance(obj, (CatRecObject, PlotFunc, PlotFactory)):
raise TypeError("End point object in '%s' is not a valid object. Usage: obj['a.b'] = {'p1':v1} or obj['a.b|p1'] = v1"%(
path if isinstance(path,basestring) else ".".join(path)
))
try:
values = dict(value)
except TypeError:
raise TypeError("End point object in '%s' is a dict like, value must be a dict like object"%(
path if isinstance(path,basestring) else ".".join(path)
))
except ValueError:
raise TypeError("End point object in %s' is a dict like, value must be a dict like object"%(
path if isinstance(path,basestring) else ".".join(path)
))
else:
return obj.new_style(style, values)
if obj is not self:
return obj.set_style_param(style, item, value)
#if not "__styles__" in obj.locals:
# obj.locals["__styles__"] = {}
#styles = obj.locals["__styles__"]
#if style not in styles:
# styles[style] = {}
#styles[style][item] = value
#return
# assume style exists in __style__
if not "__styles__" in self.locals:
self.locals["__styles__"] = {}
styles = self.locals["__styles__"]
if style not in styles:
styles[style] = {}
self.locals["__styles__"][style][item] = value
def update_style(self, style, _kw_={}, **kwargs):
kw = dict(_kw_, **kwargs)
if not "__styles__" in self.locals:
self.locals["__styles__"] = {}
styles = self.locals["__styles__"]
if not style in styles:
styles[style] = {}
styles[style].update(kw)
@property
def styleinfo(self):
styles = self.get("__styles__", {})
prt = print
idt = " "*2
if not styles: return
prt(idt+"Styles:")
for style, params in styles.iteritems():
prt(idt*2+style+":")
for k,value in params.iteritems():
value = _value_formater(value)
prt(idt*3+"|%s: %s"%(k,value))
def _style2self(self):
style = self.get("style", None)
if not style: return
if isinstance(style, basestring):
style = [s.strip() for s in ",".split(style)]
for s in style[::-1]:
for p,v in self.get_style_params(s).iteritems():
self.setdefault(p,v)
def get_styledict(self, style):
if not style: return {}
out = {}
if isinstance(style, basestring):
style = [s.strip() for s in style.split(",")]
for s in style[::-1]:
for p,v in self.get_style_params(s).iteritems():
out.setdefault(p,v)
return out
@property
def styledict(self):
style = self.get("style", None)
return self.get_styledict(style)
@property
def _info3(self):
prt = print
idt = " "*4
prt()
styles = self.get("__styles__", {})
if styles:
prt(idt+"Styles: ")
prt("%s"%(_list_formater(styles.keys(), idt=idt*2)))
###
# update the CatRecObject with the style methods
def _cat_new_style(self, style, _kw_={}, **kwargs):
for child in self:
child.new_style(style, _kw_, **kwargs)
def _cat_update_style(self, style, _kw_={}, **kwargs):
for child in self:
child.update_style(style, _kw_, **kwargs)
def _cat_set_style_param(self, style, path, value):
for child in self:
child.set_style_param(style, path, value)
CatRecObject.new_style = _cat_new_style
CatRecObject.update_style = _cat_update_style
CatRecObject.set_style_param = _cat_set_style_param
class PlotFactory(RecObject, _Style):
_unlinked = tuple() # ("go","_go_results")
_default_params = {"go": None, "_go_results": None,
"_example_": None, "__inerit__":None}
_example = None
def __call__(self, *args, **kwargs):
if self._initialized and self._stopped:
raise TypeError("This RecObject instance can be initialized only ones")
new = self.derive()
# reset the root ids
new._initialized = 1 # one -> in instance of beeing initialized
if 'style' in kwargs:
new['style'] = kwargs.pop("style", None)
#new._style2self()
_none_ = new.finit(new, *args, **kwargs)
if _none_ is not None:
raise TypeError("The init function should return None got type '%s'"%type(_none_))
new._initialized = 2 # two -> init func finished
return new
def goifgo(self):
go = self.get("go", False)
if go:
if go is True:
self["_go_results"] = self.go("-")
return
if hasattr(go,"__iteritems__"):
self["_go_results"] = self.go(*go)
return
self["_go_results"] = self.go(go)
return
def _get_direction(self):
d = self.get("direction", "y")
if d in ["y", "Y", 0]:
return "x", "y"
if d in ["x", "X", 1]:
return "y", "x"
raise ValueError("direction paramter must be one of 'y',0,'x',1 got '%s'"%d)
@property
def example(self):
ex = self.get("_example_", None)
if ex:
return get_example(*ex)
def __add__(self, right):
if not isinstance( right, (PlotFactory, PlotFunc, stack)):
raise TypeError("can add a plot only to a plot, plotfunc or stack (not '%s') "%(type(right)))
return stack([self, right])
def iteraxes(self, _ncol_, _nrow_=None, _n_=None, **kwargs):
if _nrow_ is None:
n = _ncol_
axes = [(_ncol_,i) for i in range(1, n+1)]
else:
n = _nrow_*_ncol_ if _n_ is None else _n_
axes = [(_ncol_, _nrow_, i) for i in range(1,n+1)]
kwargs.setdefault("axes", axes)
return self.iter(n, **kwargs)
def iterfigure(self, *args, **kwargs):
figs = list(range(*args))
kwargs.setdefault("figure", figs)
return self.iter(len(figs), **kwargs)
def __make_doc__(self):
doc = self.__doc__ or self.finit.__doc__
if "{paramlist}" in doc:
pass
def __param_doc__(self,param):
return param_docs.get(param,"")
def __colect_param_doc__(self, d, key="", inerit=None):
cl = self.__class__
selfinerit = cl._default_params.get("__inerit__", None)
if selfinerit:
if inerit is None:
inerit = selfinerit
else:
inerit.extend(selfinerit)
for sub in cl.__mro__:
for k,m in sub.__dict__.iteritems():
if not isinstance(m, (PlotFactory,PlotFunc,rproperty,CatRecObject)):
continue
m = getattr(self,k)
if isinstance(m, CatRecObject):
for child in m:
child.__colect_param_doc__(d,k, inerit)
else:
m.__colect_param_doc__(d, k, inerit)
class PlotFunc(RecFunc, _Style):
@property
def example(self):
ex = self.get("_example_", None)
if ex:
return get_example(*ex)
def __add__(self, right):
if not isinstance( right, (PlotFactory,PlotFunc,stack)):
raise TypeError("can add a plotfunc only to a plot, plotfunc or stack (not '%s') "%(type(right)))
return stack([self, right])
def iteraxes(self, _ncol_, _nrow_=None, _n_=None, **kwargs):
if _nrow_ is None:
n = _ncol_
axes = [(_ncol_,i) for i in range(1,n+1)]
else:
n = _nrow_*_ncol_ if _n_ is None else _n_
axes = [(_ncol_, _nrow_, i) for i in range(1,n+1)]
kwargs.setdefault("axes", axes)
return self.iter(n, **kwargs)
def iterfigure(self, *args, **kwargs):
figs = list(range(*args))
kwargs.setdefault("figure", figs)
return self.iter(len(figs), **kwargs)
def __prepare_call_args__(self, args, ikwargs):
args = self.getargs(*args)
kwargs = self.getkwargs(**ikwargs)
for a,k in zip(args[self.nposargs:], self.kwargnames):
if k in ikwargs:
raise TypeError("got multiple values for keyword argument '%s'"%k)
# remove the extra positional argument from the kwargs
kwargs.pop(k,None)
style = ikwargs.pop("style", self.get("style", None))
argnames = self.args[:len(args)]
for p,v in self.get_styledict(style).iteritems():
if p not in argnames:
kwargs.setdefault(p,v)
return args, kwargs
def _s_call__(self, *args, **ikwargs):
if not self.fcall:
raise TypeError("Not callable")
style = ikwargs.pop("style", self.get("style", None))
args, kwargs = self.__prepare_call_args__(args, ikwargs)
argnames = self.args[:len(args)]
for p,v in self.get_styledict(style).iteritems():
if p not in argnames:
kwargs.setdefault(p,v)
output = self.fcall(*args, **kwargs)
return output
def set_style_param(self, style, item, value):
if not "__styles__" in self.locals:
self.locals["__styles__"] = {}
styles = self.locals["__styles__"]
if style not in styles:
styles[style] = {}
self.locals["__styles__"][style][item] = value
def __param_doc__(self,param):
return param_docs.get(param,"")
def __colect_param_doc__(self, d, key="", inerit=None):
for param in self.args:
if inerit is None or param in inerit:
if param in d:
d[param][1].append(key)
else:
d[param] = (param,[key],self.__param_doc__(param))
@property
def _info4(self):
prt = print
idt = " "*4
style = self.get("style", None)
if not style: return
prt()
prt(idt+"From Style: %s"%style)
params = self.styledict
kwargs = self.getkwargs()
for k,value in params.iteritems():
if k in kwargs: continue
value = _value_formater(value)
prt(idt*2+"|%s: %s"%(k,value))
def get_example(name, module=None):
if module is None:
from . import examples as module
else:
try:
import importlib
base = __name__.split(".",1)[0]
module = importlib.import_module(base+"."+module)
except Exception as e:
return """raise TypeError('cannot load example "%s", "%s"')"""%(name,e)
try:
func= getattr(module, name)
except AttributeError:
return """raise TypeError('cannot find example "%s"')"""%name
source = inspect.getsource(func).replace("def %s"%name, "def %s_ex"%name)
source += "\n"+"_ = %s_ex()"%name+"\n"
return source
def get_axes(axes, fig=None, params={}):
"""
axesdef can be:
None : curent axes is used
string : look for axes with that name raise ValueError is not found
(ncol,nrow,i) : axes ith in a ncol by nrow grid (see matplotlib.figure.add_subplot)
(N,i) : ncol, nrow are created so ncol*nrow<N and the grid is
as square as possible. e.i N=10 is a 4x3 grid
matplotlib.Axes object : use that axes
axes instance : use axes defined there
int : look for the i'th axes in the plot raise ValueError if not found
params is a dictionary used only when an axes is created
"""
if axes is None:
figure = get_figure(fig)
return figure.gca()
if isinstance(axes, plot_axes_classes):
return get_axes(axes.get("axes",None), axes.get("figure",fig), params)
if isinstance(axes, basestring):
name = axes
figure = get_figure(fig)
for ax in (figure.axes or []):
if getattr(ax, "id", None) == name:
axes = ax
break
else:
# raise an error if not found
# however user as still possibility to send
# a tuple of (name, axes) see below
raise ValueError("Cannot find axes of name '%s' in this figure"%name)
return axes
if isinstance(axes, (tuple,list)):
figure = get_figure(fig)
coord = axes
if len(coord)==2:
if isinstance(coord[1], basestring):
# if string the second is a figure
axes, fig = coord
# "3" must be 3
try:
intfig = int(fig)
except:
pass
else:
fig = intfig
return get_axes(axes, fig, params)
if isinstance(coord[0], basestring):
# this is a (name, axes) tuple
# try to find the name if not found
# find the axes and rename it
name, axes = coord
try:
axes = get_axes(name, fig)
except TypeError:
axes = get_axes(axes, fig, params)
axes.id = name
finally:
return axes
if isinstance(coord[0], int):
#########
# If coord is a (N,i) tuple, build the (nx,ny,i) tuple automaticaly
#########
nx = int(plt.np.sqrt(coord[0]))
ny = int(plt.np.ceil(coord[0]/float(nx)))
coord = (nx,ny,coord[1])
else:
raise TypeError("invalid 2 tuple axes '%s'"%coord)
elif len(coord)==1:
#this is explicitaly a number (probably superior to 100)
axes, = coord
if not isinstance(axes,int):
return get_axes(axes, fig, params)
figure = get_figure(fig)
num = axes
##
# conserve the supid 1 has the 1st axes e.g. 0th
# to avoide confusion
num -= 1
if num>=len(figure.axes):
raise ValueError("cannot find the %dth axes, figure has only %d axes"%(num+1,len(figure.axes)))
return figure.axes[num]
if len(coord)!=3:
raise TypeError("coord tuple must be of len 2 or 3 got %d"%len(coord))
axes = _add_subplot(figure, coord, params)
return axes
if isinstance(axes, int):
figure = get_figure(fig)
num = axes
##
# it is prety much unlikely that more than 100 axes is intanded
# to be plot. so better to put the short cut 121 as (1,2,1)
# user can still force axes number with (456,) which is the axes
# number #456
if num>100:
if num>9981:
raise ValueError("int acceed 9981 use (num,) to force to the num'th axes")
coord = tuple(int(n) for n in str(num))
return get_axes(coord, fig, params)
##
# conserve the supid 1 has the 1st axes e.g. 0th
# to avoide confusion
num -= 1
if num>=len(figure.axes):
raise ValueError("cannot find the %dth axes, figure has only %d axes"%(num+1,len(figure.axes)))
axes = figure.axes[num]
return axes
if isinstance(axes,(plt.Axes, plt.Subplot)):
return axes
raise TypeError("axes keyword must be a tuple, integer, None, string or Axes instance got a '%s'"%(type(axes)))
def get_figure(fig, axes=None):
if isinstance(fig,plt.Figure):
return fig
if fig is None:
if axes is None:
return plt.gcf()
return get_axes(axes).figure
if isinstance(fig, (tuple,list)):
tpl = fig
if len(tpl)!=2:
TypeError("Expecting a 2 tuple for figure got '%s'"%tpl)
if isinstance(tpl[0], basestring):
name, fig = tpl
fig = figure(name)
else:
fig = get_figure(None, axes)
if len(tpl)!=2:
TypeError("Expecting a 2 tuple for figure got '%s'"%tpl)
nrows, ncols = tpl
gs = GridSpec(nrows, ncols)
for i in range(nrows*ncols):
fig.add_subplot(gs[i // ncols, i % ncols])
return fig
return plt.figure(fig)
| gpl-2.0 |
akionakamura/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
ishanic/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pv/scikit-learn | examples/classification/plot_lda.py | 164 | 2224 | """
====================================================================
Normal and Shrinkage Linear Discriminant Analysis for classification
====================================================================
Shows how shrinkage improves classification.
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.lda import LDA
n_train = 20 # samples for training
n_test = 200 # samples for testing
n_averages = 50 # how often to repeat classification
n_features_max = 75 # maximum number of features
step = 4 # step size for the calculation
def generate_data(n_samples, n_features):
"""Generate random blob-ish data with noisy features.
This returns an array of input data with shape `(n_samples, n_features)`
and an array of `n_samples` target labels.
Only one feature contains discriminative information, the other features
contain only noise.
"""
X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]])
# add non-discriminative features
if n_features > 1:
X = np.hstack([X, np.random.randn(n_samples, n_features - 1)])
return X, y
acc_clf1, acc_clf2 = [], []
n_features_range = range(1, n_features_max + 1, step)
for n_features in n_features_range:
score_clf1, score_clf2 = 0, 0
for _ in range(n_averages):
X, y = generate_data(n_train, n_features)
clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y)
clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y)
X, y = generate_data(n_test, n_features)
score_clf1 += clf1.score(X, y)
score_clf2 += clf2.score(X, y)
acc_clf1.append(score_clf1 / n_averages)
acc_clf2.append(score_clf2 / n_averages)
features_samples_ratio = np.array(n_features_range) / n_train
plt.plot(features_samples_ratio, acc_clf1, linewidth=2,
label="LDA with shrinkage", color='r')
plt.plot(features_samples_ratio, acc_clf2, linewidth=2,
label="LDA", color='g')
plt.xlabel('n_features / n_samples')
plt.ylabel('Classification accuracy')
plt.legend(loc=1, prop={'size': 12})
plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)')
plt.show()
| bsd-3-clause |
nwillemse/nctrader | examples/pandas_examples/pandas_bar_display_prices_backtest.py | 1 | 5320 | import click
import os
import pandas as pd
import requests_cache
import pandas_datareader.data as web
from nctrader import settings
from nctrader.compat import queue
from nctrader.price_parser import PriceParser
from nctrader.price_handler import GenericPriceHandler
from nctrader.price_handler.iterator.pandas import PandasBarEventIterator
from nctrader.strategy import DisplayStrategy, Strategies
from nctrader.strategy.buy_and_hold import BuyAndHoldStrategy
from nctrader.position_sizer.fixed import FixedPositionSizer
from nctrader.risk_manager.example import ExampleRiskManager
from nctrader.portfolio_handler import PortfolioHandler
from nctrader.compliance.example import ExampleCompliance
from nctrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler
from nctrader.statistics.simple import SimpleStatistics
from nctrader.trading_session.backtest import Backtest
def init_session(cache_name, cache_backend, expire_after):
if expire_after == '0':
expire_after = None
print("expire_after==0 no cache")
return None
else:
if expire_after == '-1':
expire_after = 0
print("Installing cache '%s.sqlite' without expiration" % cache_name)
else:
expire_after = pd.to_timedelta(expire_after, unit='s')
print("Installing cache '%s.sqlite' with expire_after=%s (d days hh:mm:ss)" % (cache_name, expire_after))
session = requests_cache.CachedSession(cache_name=cache_name, backend=cache_backend, expire_after=expire_after)
return session
def run(cache_name, cache_backend, expire_after, data_source, start, end, config, testing, tickers, filename, n, n_window):
# Set up variables needed for backtest
events_queue = queue.Queue()
initial_equity = PriceParser.parse(500000.00)
session = init_session(cache_name, cache_backend, expire_after)
period = 86400 # Seconds in a day
if len(tickers) == 1:
data = web.DataReader(tickers[0], data_source, start, end, session=session)
else:
data = web.DataReader(tickers, data_source, start, end, session=session)
# Use Generic Bar Handler with Pandas Bar Iterator
price_event_iterator = PandasBarEventIterator(data, period, tickers[0])
price_handler = GenericPriceHandler(events_queue, price_event_iterator)
# Use the Display Strategy
strategy1 = DisplayStrategy(n=n, n_window=n_window)
strategy2 = BuyAndHoldStrategy(tickers, events_queue)
strategy = Strategies(strategy1, strategy2)
# Use an example Position Sizer
position_sizer = FixedPositionSizer()
# Use an example Risk Manager
risk_manager = ExampleRiskManager()
# Use the default Portfolio Handler
portfolio_handler = PortfolioHandler(
initial_equity, events_queue, price_handler,
position_sizer, risk_manager
)
# Use the ExampleCompliance component
compliance = ExampleCompliance(config)
# Use a simulated IB Execution Handler
execution_handler = IBSimulatedExecutionHandler(
events_queue, price_handler, compliance
)
# Use the default Statistics
statistics = SimpleStatistics(config, portfolio_handler)
# Set up the backtest
backtest = Backtest(
price_handler, strategy,
portfolio_handler, execution_handler,
position_sizer, risk_manager,
statistics, initial_equity
)
results = backtest.simulate_trading(testing=testing)
statistics.save(filename)
return results
@click.command()
@click.option('--max_rows', default=20, help='Maximum number of rows displayed')
@click.option('--cache_name', default='', help="Cache name (file if backend is sqlite)")
@click.option('--cache_backend', default='sqlite', help="Cache backend - default 'sqlite'")
@click.option('--expire_after', default='24:00:00.0', help=u"Cache expiration (0: no cache, -1: no expiration, d: d seconds expiration cache)")
@click.option('--data_source', default='yahoo', help='the data source ("yahoo", "yahoo-actions", "yahoo-dividends", "google", "fred", "ff", or "edgar-index")')
@click.option('--start', default='2010-01-04', help='Start')
@click.option('--end', default='2016-06-22', help='End')
@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')
@click.option('--testing/--no-testing', default=False, help='Enable testing mode')
@click.option('--tickers', default='^GSPC', help='Tickers (use comma) - default is "^GSPC" ie "SP500"')
@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')
@click.option('--n', default=100, help='Display prices every n price events')
@click.option('--n_window', default=5, help='Display n_window prices')
def main(max_rows, cache_name, cache_backend, expire_after, data_source, start, end, config, testing, tickers, filename, n, n_window):
pd.set_option('max_rows', max_rows)
if cache_name == '':
cache_name = "requests_cache"
else:
if cache_backend.lower() == 'sqlite':
cache_name = os.path.expanduser(cache_name)
tickers = tickers.split(",")
config = settings.from_file(config, testing)
run(cache_name, cache_backend, expire_after, data_source, start, end, config, testing, tickers, filename, n, n_window)
if __name__ == "__main__":
main()
| mit |
siutanwong/scikit-learn | sklearn/preprocessing/tests/test_function_transformer.py | 176 | 2169 | from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
| bsd-3-clause |
nttks/jenkins-test | docs/en_us/platform_api/source/conf.py | 12 | 6815 | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# pylint: disable=redefined-builtin
# pylint: disable=protected-access
# pylint: disable=unused-argument
import os
from path import path
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "lms/djangoapps/mobile_api")
sys.path.append(root / "lms/djangoapps/mobile_api/course_info")
sys.path.append(root / "lms/djangoapps/mobile_api/users")
sys.path.append(root / "lms/djangoapps/mobile_api/video_outlines")
sys.path.insert(
0,
os.path.abspath(
os.path.normpath(
os.path.dirname(__file__) + '/../../../'
)
)
)
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform API Version 0.5 Alpha'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
Sotera/GEQE | geqe-ml/lib/plotting.py | 1 | 2128 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def updateMonitorPlot(mPX, mPY, mSL, jobNm):
plt.xlabel("Application Step")
plt.ylabel("Time to complete")
plt.suptitle(jobNm+" Progress")
plt.subplots_adjust(bottom=0.45)
ax = plt.subplot()
ax.bar(mPX, mPY, width=1.0)
ax.set_xticks(map(lambda x: x-0.5, range(1,len(mPX)+1)))
ax.set_xticklabels(mSL,rotation=45)
ax.set_yscale('log')
plt.savefig("monitorFiles/"+jobNm+".png")
def generateROCCurve(tAndP, nPos, nNeg, jobNm):
print "Positive Points:", nPos, "\tNegative Points:", nNeg
tpr = []
fpr = []
f_out = open('scoreFiles/'+jobNm, 'w')
f_out.write("nPos: " + str(nPos) + ", nNeg: " + str(nNeg) + "\n")
for thresh in map(lambda x: (10.-1.*x)/10.,range(21)):
# tp -> condition positive, predicted positive
# fp -> condition negative, predicted positive
# tn -> condition negative, predicted negative
# fn -> condition positive, predicted negative
true_positive = 0
false_positive = 0
true_negative = 0
false_negative = 0
for point in tAndP:
if point[0] == 1. and point[1] >= thresh:
true_positive = true_positive + 1
elif point[0] == 1. and point[1] < thresh:
false_negative = false_negative + 1
elif point[0] == 0. and point[1] >= thresh:
false_positive = false_positive + 1
elif point[0] == 0. and point[1] < thresh:
true_negative = true_negative + 1
f_out.write("\tThreshold: " + str(thresh) + "\n")
f_out.write("\t\tTP: " + str(true_positive) + ", FP: " + str(false_positive) + ", FN: " + str(false_negative) + ", TN: " + str(true_negative) + "\n")
tpr.append((1.*true_positive)/(1.*nPos))
fpr.append((1.*false_positive)/(1.*nNeg))
plt.xlabel("False Positive Rate (1-Specificity)")
plt.ylabel("True Positive Rate (Sensitivity)")
plt.plot(fpr, tpr, label="ROC for job:"+jobNm)
plt.plot([0,1],[0,1], 'r--')
plt.savefig("monitorFiles/"+jobNm+".png") | unlicense |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axis.py | 69 | 54453 | """
Classes for the ticks and x and y axis
"""
from __future__ import division
from matplotlib import rcParams
import matplotlib.artist as artist
import matplotlib.cbook as cbook
import matplotlib.font_manager as font_manager
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.scale as mscale
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
import matplotlib.units as munits
class Tick(artist.Artist):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
def __init__(self, axes, loc, label,
size = None, # points
gridOn = None, # defaults to axes.grid
tick1On = True,
tick2On = True,
label1On = True,
label2On = False,
major = True,
):
"""
bbox is the Bound2D bounding box in display coords of the Axes
loc is the tick location in data coords
size is the tick size in relative, axes coords
"""
artist.Artist.__init__(self)
if gridOn is None: gridOn = rcParams['axes.grid']
self.set_figure(axes.figure)
self.axes = axes
name = self.__name__.lower()
if size is None:
if major:
size = rcParams['%s.major.size'%name]
pad = rcParams['%s.major.pad'%name]
else:
size = rcParams['%s.minor.size'%name]
pad = rcParams['%s.minor.pad'%name]
self._tickdir = rcParams['%s.direction'%name]
if self._tickdir == 'in':
self._xtickmarkers = (mlines.TICKUP, mlines.TICKDOWN)
self._ytickmarkers = (mlines.TICKRIGHT, mlines.TICKLEFT)
self._pad = pad
else:
self._xtickmarkers = (mlines.TICKDOWN, mlines.TICKUP)
self._ytickmarkers = (mlines.TICKLEFT, mlines.TICKRIGHT)
self._pad = pad + size
self._loc = loc
self._size = size
self.tick1line = self._get_tick1line()
self.tick2line = self._get_tick2line()
self.gridline = self._get_gridline()
self.label1 = self._get_text1()
self.label = self.label1 # legacy name
self.label2 = self._get_text2()
self.gridOn = gridOn
self.tick1On = tick1On
self.tick2On = tick2On
self.label1On = label1On
self.label2On = label2On
self.update_position(loc)
def get_children(self):
children = [self.tick1line, self.tick2line, self.gridline, self.label1, self.label2]
return children
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
#self.tick1line.set_clip_path(clippath, transform)
#self.tick2line.set_clip_path(clippath, transform)
self.gridline.set_clip_path(clippath, transform)
set_clip_path.__doc__ = artist.Artist.set_clip_path.__doc__
def get_pad_pixels(self):
return self.figure.dpi * self._pad / 72.0
def contains(self, mouseevent):
"""
Test whether the mouse event occured in the Tick marks.
This function always returns false. It is more useful to test if the
axis as a whole contains the mouse rather than the set of tick marks.
"""
if callable(self._contains): return self._contains(self,mouseevent)
return False,{}
def set_pad(self, val):
"""
Set the tick label pad in points
ACCEPTS: float
"""
self._pad = val
def get_pad(self):
'Get the value of the tick label pad in points'
return self._pad
def _get_text1(self):
'Get the default Text 1 instance'
pass
def _get_text2(self):
'Get the default Text 2 instance'
pass
def _get_tick1line(self):
'Get the default line2D instance for tick1'
pass
def _get_tick2line(self):
'Get the default line2D instance for tick2'
pass
def _get_gridline(self):
'Get the default grid Line2d instance for this tick'
pass
def get_loc(self):
'Return the tick location (data coords) as a scalar'
return self._loc
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__name__)
midPoint = mtransforms.interval_contains(self.get_view_interval(), self.get_loc())
if midPoint:
if self.gridOn:
self.gridline.draw(renderer)
if self.tick1On:
self.tick1line.draw(renderer)
if self.tick2On:
self.tick2line.draw(renderer)
if self.label1On:
self.label1.draw(renderer)
if self.label2On:
self.label2.draw(renderer)
renderer.close_group(self.__name__)
def set_label1(self, s):
"""
Set the text of ticklabel
ACCEPTS: str
"""
self.label1.set_text(s)
set_label = set_label1
def set_label2(self, s):
"""
Set the text of ticklabel2
ACCEPTS: str
"""
self.label2.set_text(s)
def _set_artist_props(self, a):
a.set_figure(self.figure)
#if isinstance(a, mlines.Line2D): a.set_clip_box(self.axes.bbox)
def get_view_interval(self):
'return the view Interval instance for the axis this tick is ticking'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
class XTick(Tick):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
__name__ = 'xtick'
def _get_text1(self):
'Get the default Text instance'
# the y loc is 3 points below the min of y axis
# get the affine as an a,b,c,d,tx,ty list
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text1_transform(self._pad)
size = rcParams['xtick.labelsize']
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=size),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text 2 instance'
# x in data coords, y in axes coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_xaxis_text2_transform(self._pad)
t = mtext.Text(
x=0, y=1,
fontproperties=font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color=rcParams['xtick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0,), ydata=(0,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[0],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D( xdata=(0,), ydata=(1,),
color='k',
linestyle = 'None',
marker = self._xtickmarkers[1],
markersize=self._size,
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in data coords, y in axes coords
l = mlines.Line2D(xdata=(0.0, 0.0), ydata=(0, 1.0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_xaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar *loc*'
x = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_xdata((x,))
if self.tick2On:
self.tick2line.set_xdata((x,))
if self.gridOn:
self.gridline.set_xdata((x,))
if self.label1On:
self.label1.set_x(x)
if self.label2On:
self.label2.set_x(x)
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
class YTick(Tick):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
__name__ = 'ytick'
# how far from the y axis line the right of the ticklabel are
def _get_text1(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text1_transform(self._pad)
t = mtext.Text(
x=0, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
#t.set_transform( self.axes.transData )
self._set_artist_props(t)
return t
def _get_text2(self):
'Get the default Text instance'
# x in axes coords, y in data coords
#t = mtext.Text(
trans, vert, horiz = self.axes.get_yaxis_text2_transform(self._pad)
t = mtext.Text(
x=1, y=0,
fontproperties=font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color=rcParams['ytick.color'],
verticalalignment=vert,
horizontalalignment=horiz,
)
t.set_transform(trans)
self._set_artist_props(t)
return t
def _get_tick1line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (0,), (0,), color='k',
marker = self._ytickmarkers[0],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_tick2line(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( (1,), (0,), color='k',
marker = self._ytickmarkers[1],
linestyle = 'None',
markersize=self._size,
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def _get_gridline(self):
'Get the default line2D instance'
# x in axes coords, y in data coords
l = mlines.Line2D( xdata=(0,1), ydata=(0, 0),
color=rcParams['grid.color'],
linestyle=rcParams['grid.linestyle'],
linewidth=rcParams['grid.linewidth'],
)
l.set_transform(self.axes.get_yaxis_transform())
self._set_artist_props(l)
return l
def update_position(self, loc):
'Set the location of tick in data coords with scalar loc'
y = loc
nonlinear = (hasattr(self.axes, 'yaxis') and
self.axes.yaxis.get_scale() != 'linear' or
hasattr(self.axes, 'xaxis') and
self.axes.xaxis.get_scale() != 'linear')
if self.tick1On:
self.tick1line.set_ydata((y,))
if self.tick2On:
self.tick2line.set_ydata((y,))
if self.gridOn:
self.gridline.set_ydata((y, ))
if self.label1On:
self.label1.set_y( y )
if self.label2On:
self.label2.set_y( y )
if nonlinear:
self.tick1line._invalid = True
self.tick2line._invalid = True
self.gridline._invalid = True
self._loc = loc
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore = False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
class Ticker:
locator = None
formatter = None
class Axis(artist.Artist):
"""
Public attributes
* :attr:`transData` - transform data coords to display coords
* :attr:`transAxis` - transform axis coords to display coords
"""
LABELPAD = 5
OFFSETTEXTPAD = 3
def __str__(self):
return self.__class__.__name__ \
+ "(%f,%f)"%tuple(self.axes.transAxes.transform_point((0,0)))
def __init__(self, axes, pickradius=15):
"""
Init the axis with the parent Axes instance
"""
artist.Artist.__init__(self)
self.set_figure(axes.figure)
self.axes = axes
self.major = Ticker()
self.minor = Ticker()
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
#class dummy:
# locator = None
# formatter = None
#self.major = dummy()
#self.minor = dummy()
self._autolabelpos = True
self.label = self._get_label()
self.offsetText = self._get_offset_text()
self.majorTicks = []
self.minorTicks = []
self.pickradius = pickradius
self.cla()
self.set_scale('linear')
def set_label_coords(self, x, y, transform=None):
"""
Set the coordinates of the label. By default, the x
coordinate of the y label is determined by the tick label
bounding boxes, but this can lead to poor alignment of
multiple ylabels if there are multiple axes. Ditto for the y
coodinate of the x label.
You can also specify the coordinate system of the label with
the transform. If None, the default coordinate system will be
the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5)
is middle, etc
"""
self._autolabelpos = False
if transform is None:
transform = self.axes.transAxes
self.label.set_transform(transform)
self.label.set_position((x, y))
def get_transform(self):
return self._scale.get_transform()
def get_scale(self):
return self._scale.name
def set_scale(self, value, **kwargs):
self._scale = mscale.scale_factory(value, self, **kwargs)
self._scale.set_default_locators_and_formatters(self)
def limit_range_for_scale(self, vmin, vmax):
return self._scale.limit_range_for_scale(vmin, vmax, self.get_minpos())
def get_children(self):
children = [self.label]
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
children.extend(majorticks)
children.extend(minorticks)
return children
def cla(self):
'clear the current axis'
self.set_major_locator(mticker.AutoLocator())
self.set_major_formatter(mticker.ScalarFormatter())
self.set_minor_locator(mticker.NullLocator())
self.set_minor_formatter(mticker.NullFormatter())
# Clear the callback registry for this axis, or it may "leak"
self.callbacks = cbook.CallbackRegistry(('units', 'units finalize'))
# whether the grids are on
self._gridOnMajor = rcParams['axes.grid']
self._gridOnMinor = False
self.label.set_text('')
self._set_artist_props(self.label)
# build a few default ticks; grow as necessary later; only
# define 1 so properties set on ticks will be copied as they
# grow
cbook.popall(self.majorTicks)
cbook.popall(self.minorTicks)
self.majorTicks.extend([self._get_tick(major=True)])
self.minorTicks.extend([self._get_tick(major=False)])
self._lastNumMajorTicks = 1
self._lastNumMinorTicks = 1
self.converter = None
self.units = None
self.set_units(None)
def set_clip_path(self, clippath, transform=None):
artist.Artist.set_clip_path(self, clippath, transform)
majorticks = self.get_major_ticks()
minorticks = self.get_minor_ticks()
for child in self.majorTicks + self.minorTicks:
child.set_clip_path(clippath, transform)
def get_view_interval(self):
'return the Interval instance for this axis view limits'
raise NotImplementedError('Derived must override')
def set_view_interval(self, vmin, vmax, ignore=False):
raise NotImplementedError('Derived must override')
def get_data_interval(self):
'return the Interval instance for this axis data limits'
raise NotImplementedError('Derived must override')
def set_data_interval(self):
'Set the axis data limits'
raise NotImplementedError('Derived must override')
def _set_artist_props(self, a):
if a is None: return
a.set_figure(self.figure)
def iter_ticks(self):
"""
Iterate through all of the major and minor ticks.
"""
majorLocs = self.major.locator()
majorTicks = self.get_major_ticks(len(majorLocs))
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
minorLocs = self.minor.locator()
minorTicks = self.get_minor_ticks(len(minorLocs))
self.minor.formatter.set_locs(minorLocs)
minorLabels = [self.minor.formatter(val, i) for i, val in enumerate(minorLocs)]
major_minor = [
(majorTicks, majorLocs, majorLabels),
(minorTicks, minorLocs, minorLabels)]
for group in major_minor:
for tick in zip(*group):
yield tick
def get_ticklabel_extents(self, renderer):
"""
Get the extents of the tick labels on either side
of the axes.
"""
ticklabelBoxes = []
ticklabelBoxes2 = []
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
if len(ticklabelBoxes):
bbox = mtransforms.Bbox.union(ticklabelBoxes)
else:
bbox = mtransforms.Bbox.from_extents(0, 0, 0, 0)
if len(ticklabelBoxes2):
bbox2 = mtransforms.Bbox.union(ticklabelBoxes2)
else:
bbox2 = mtransforms.Bbox.from_extents(0, 0, 0, 0)
return bbox, bbox2
def draw(self, renderer, *args, **kwargs):
'Draw the axis lines, grid lines, tick lines and labels'
ticklabelBoxes = []
ticklabelBoxes2 = []
if not self.get_visible(): return
renderer.open_group(__name__)
interval = self.get_view_interval()
for tick, loc, label in self.iter_ticks():
if tick is None: continue
if not mtransforms.interval_contains(interval, loc): continue
tick.update_position(loc)
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
if tick.label1On and tick.label1.get_visible():
extent = tick.label1.get_window_extent(renderer)
ticklabelBoxes.append(extent)
if tick.label2On and tick.label2.get_visible():
extent = tick.label2.get_window_extent(renderer)
ticklabelBoxes2.append(extent)
# scale up the axis label box to also find the neighbors, not
# just the tick labels that actually overlap note we need a
# *copy* of the axis label box because we don't wan't to scale
# the actual bbox
self._update_label_position(ticklabelBoxes, ticklabelBoxes2)
self.label.draw(renderer)
self._update_offset_text_position(ticklabelBoxes, ticklabelBoxes2)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.draw(renderer)
if 0: # draw the bounding boxes around the text for debug
for tick in majorTicks:
label = tick.label1
mpatches.bbox_artist(label, renderer)
mpatches.bbox_artist(self.label, renderer)
renderer.close_group(__name__)
def _get_label(self):
raise NotImplementedError('Derived must override')
def _get_offset_text(self):
raise NotImplementedError('Derived must override')
def get_gridlines(self):
'Return the grid lines as a list of Line2D instance'
ticks = self.get_major_ticks()
return cbook.silent_list('Line2D gridline', [tick.gridline for tick in ticks])
def get_label(self):
'Return the axis label as a Text instance'
return self.label
def get_offset_text(self):
'Return the axis offsetText as a Text instance'
return self.offsetText
def get_pickradius(self):
'Return the depth of the axis used by the picker'
return self.pickradius
def get_majorticklabels(self):
'Return a list of Text instances for the major ticklabels'
ticks = self.get_major_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text major ticklabel', labels1+labels2)
def get_minorticklabels(self):
'Return a list of Text instances for the minor ticklabels'
ticks = self.get_minor_ticks()
labels1 = [tick.label1 for tick in ticks if tick.label1On]
labels2 = [tick.label2 for tick in ticks if tick.label2On]
return cbook.silent_list('Text minor ticklabel', labels1+labels2)
def get_ticklabels(self, minor=False):
'Return a list of Text instances for ticklabels'
if minor:
return self.get_minorticklabels()
return self.get_majorticklabels()
def get_majorticklines(self):
'Return the major tick lines as a list of Line2D instances'
lines = []
ticks = self.get_major_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_minorticklines(self):
'Return the minor tick lines as a list of Line2D instances'
lines = []
ticks = self.get_minor_ticks()
for tick in ticks:
lines.append(tick.tick1line)
lines.append(tick.tick2line)
return cbook.silent_list('Line2D ticklines', lines)
def get_ticklines(self, minor=False):
'Return the tick lines as a list of Line2D instances'
if minor:
return self.get_minorticklines()
return self.get_majorticklines()
def get_majorticklocs(self):
"Get the major tick locations in data coordinates as a numpy array"
return self.major.locator()
def get_minorticklocs(self):
"Get the minor tick locations in data coordinates as a numpy array"
return self.minor.locator()
def get_ticklocs(self, minor=False):
"Get the tick locations in data coordinates as a numpy array"
if minor:
return self.minor.locator()
return self.major.locator()
def _get_tick(self, major):
'return the default tick intsance'
raise NotImplementedError('derived must override')
def _copy_tick_props(self, src, dest):
'Copy the props from src tick to dest tick'
if src is None or dest is None: return
dest.label1.update_from(src.label1)
dest.label2.update_from(src.label2)
dest.tick1line.update_from(src.tick1line)
dest.tick2line.update_from(src.tick2line)
dest.gridline.update_from(src.gridline)
dest.tick1On = src.tick1On
dest.tick2On = src.tick2On
dest.label1On = src.label1On
dest.label2On = src.label2On
def get_major_locator(self):
'Get the locator of the major ticker'
return self.major.locator
def get_minor_locator(self):
'Get the locator of the minor ticker'
return self.minor.locator
def get_major_formatter(self):
'Get the formatter of the major ticker'
return self.major.formatter
def get_minor_formatter(self):
'Get the formatter of the minor ticker'
return self.minor.formatter
def get_major_ticks(self, numticks=None):
'get the tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_major_locator()())
if len(self.majorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.majorTicks)):
tick = self._get_tick(major=True)
self.majorTicks.append(tick)
if self._lastNumMajorTicks < numticks:
protoTick = self.majorTicks[0]
for i in range(self._lastNumMajorTicks, len(self.majorTicks)):
tick = self.majorTicks[i]
if self._gridOnMajor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMajorTicks = numticks
ticks = self.majorTicks[:numticks]
return ticks
def get_minor_ticks(self, numticks=None):
'get the minor tick instances; grow as necessary'
if numticks is None:
numticks = len(self.get_minor_locator()())
if len(self.minorTicks) < numticks:
# update the new tick label properties from the old
for i in range(numticks - len(self.minorTicks)):
tick = self._get_tick(major=False)
self.minorTicks.append(tick)
if self._lastNumMinorTicks < numticks:
protoTick = self.minorTicks[0]
for i in range(self._lastNumMinorTicks, len(self.minorTicks)):
tick = self.minorTicks[i]
if self._gridOnMinor: tick.gridOn = True
self._copy_tick_props(protoTick, tick)
self._lastNumMinorTicks = numticks
ticks = self.minorTicks[:numticks]
return ticks
def grid(self, b=None, which='major', **kwargs):
"""
Set the axis grid on or off; b is a boolean use *which* =
'major' | 'minor' to set the grid for major or minor ticks
if *b* is *None* and len(kwargs)==0, toggle the grid state. If
*kwargs* are supplied, it is assumed you want the grid on and *b*
will be set to True
*kwargs* are used to set the line properties of the grids, eg,
xax.grid(color='r', linestyle='-', linewidth=2)
"""
if len(kwargs): b = True
if which.lower().find('minor')>=0:
if b is None: self._gridOnMinor = not self._gridOnMinor
else: self._gridOnMinor = b
for tick in self.minorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMinor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
else:
if b is None: self._gridOnMajor = not self._gridOnMajor
else: self._gridOnMajor = b
for tick in self.majorTicks: # don't use get_ticks here!
if tick is None: continue
tick.gridOn = self._gridOnMajor
if len(kwargs): artist.setp(tick.gridline,**kwargs)
def update_units(self, data):
"""
introspect *data* for units converter and update the
axis.converter instance if necessary. Return *True* is *data* is
registered for unit conversion
"""
converter = munits.registry.get_converter(data)
if converter is None: return False
self.converter = converter
default = self.converter.default_units(data)
#print 'update units: default="%s", units=%s"'%(default, self.units)
if default is not None and self.units is None:
self.set_units(default)
self._update_axisinfo()
return True
def _update_axisinfo(self):
"""
check the axis converter for the stored units to see if the
axis info needs to be updated
"""
if self.converter is None:
return
info = self.converter.axisinfo(self.units)
if info is None:
return
if info.majloc is not None and self.major.locator!=info.majloc:
self.set_major_locator(info.majloc)
if info.minloc is not None and self.minor.locator!=info.minloc:
self.set_minor_locator(info.minloc)
if info.majfmt is not None and self.major.formatter!=info.majfmt:
self.set_major_formatter(info.majfmt)
if info.minfmt is not None and self.minor.formatter!=info.minfmt:
self.set_minor_formatter(info.minfmt)
if info.label is not None:
label = self.get_label()
label.set_text(info.label)
def have_units(self):
return self.converter is not None or self.units is not None
def convert_units(self, x):
if self.converter is None:
self.converter = munits.registry.get_converter(x)
if self.converter is None:
#print 'convert_units returning identity: units=%s, converter=%s'%(self.units, self.converter)
return x
ret = self.converter.convert(x, self.units)
#print 'convert_units converting: axis=%s, units=%s, converter=%s, in=%s, out=%s'%(self, self.units, self.converter, x, ret)
return ret
def set_units(self, u):
"""
set the units for axis
ACCEPTS: a units tag
"""
pchanged = False
if u is None:
self.units = None
pchanged = True
else:
if u!=self.units:
self.units = u
#print 'setting units', self.converter, u, munits.registry.get_converter(u)
pchanged = True
if pchanged:
self._update_axisinfo()
self.callbacks.process('units')
self.callbacks.process('units finalize')
def get_units(self):
'return the units for axis'
return self.units
def set_major_formatter(self, formatter):
"""
Set the formatter of the major ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.major.formatter = formatter
formatter.set_axis(self)
def set_minor_formatter(self, formatter):
"""
Set the formatter of the minor ticker
ACCEPTS: A :class:`~matplotlib.ticker.Formatter` instance
"""
self.minor.formatter = formatter
formatter.set_axis(self)
def set_major_locator(self, locator):
"""
Set the locator of the major ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.major.locator = locator
locator.set_axis(self)
def set_minor_locator(self, locator):
"""
Set the locator of the minor ticker
ACCEPTS: a :class:`~matplotlib.ticker.Locator` instance
"""
self.minor.locator = locator
locator.set_axis(self)
def set_pickradius(self, pickradius):
"""
Set the depth of the axis used by the picker
ACCEPTS: a distance in points
"""
self.pickradius = pickradius
def set_ticklabels(self, ticklabels, *args, **kwargs):
"""
Set the text values of the tick labels. Return a list of Text
instances. Use *kwarg* *minor=True* to select minor ticks.
ACCEPTS: sequence of strings
"""
#ticklabels = [str(l) for l in ticklabels]
minor = kwargs.pop('minor', False)
if minor:
self.set_minor_formatter(mticker.FixedFormatter(ticklabels))
ticks = self.get_minor_ticks()
else:
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ticks = self.get_major_ticks()
self.set_major_formatter( mticker.FixedFormatter(ticklabels) )
ret = []
for i, tick in enumerate(ticks):
if i<len(ticklabels):
tick.label1.set_text(ticklabels[i])
ret.append(tick.label1)
tick.label1.update(kwargs)
return ret
def set_ticks(self, ticks, minor=False):
"""
Set the locations of the tick marks from sequence ticks
ACCEPTS: sequence of floats
"""
### XXX if the user changes units, the information will be lost here
ticks = self.convert_units(ticks)
if len(ticks) > 1:
xleft, xright = self.get_view_interval()
if xright > xleft:
self.set_view_interval(min(ticks), max(ticks))
else:
self.set_view_interval(max(ticks), min(ticks))
if minor:
self.set_minor_locator(mticker.FixedLocator(ticks))
return self.get_minor_ticks(len(ticks))
else:
self.set_major_locator( mticker.FixedLocator(ticks) )
return self.get_major_ticks(len(ticks))
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def _update_offset_text_postion(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
raise NotImplementedError('Derived must override')
def pan(self, numsteps):
'Pan *numsteps* (can be positive or negative)'
self.major.locator.pan(numsteps)
def zoom(self, direction):
"Zoom in/out on axis; if *direction* is >0 zoom in, else zoom out"
self.major.locator.zoom(direction)
class XAxis(Axis):
__name__ = 'xaxis'
axis_name = 'x'
def contains(self,mouseevent):
"""Test whether the mouse event occured in the x axis.
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = xaxes>=0 and xaxes<=1 and (
(y<b and y>b-self.pickradius) or
(y>t and y<t+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return XTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in axes coords, y in display coords (to be updated at draw
# time by _update_label_positions)
label = mtext.Text(x=0.5, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='top',
horizontalalignment='center',
)
label.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(label)
self.label_position='bottom'
return label
def _get_offset_text(self):
# x in axes coords, y in display coords (to be updated at draw time)
offsetText = mtext.Text(x=1, y=0,
fontproperties = font_manager.FontProperties(size=rcParams['xtick.labelsize']),
color = rcParams['xtick.color'],
verticalalignment='top',
horizontalalignment='right',
)
offsetText.set_transform( mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform() ))
self._set_artist_props(offsetText)
self.offset_text_position='bottom'
return offsetText
def get_label_position(self):
"""
Return the label position (top or bottom)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (top or bottom)
ACCEPTS: [ 'top' | 'bottom' ]
"""
assert position == 'top' or position == 'bottom'
if position == 'top':
self.label.set_verticalalignment('bottom')
else:
self.label.set_verticalalignment('top')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'bottom':
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.label.set_position( (x, bottom - self.LABELPAD*self.figure.dpi / 72.0))
else:
if not len(bboxes2):
top = self.axes.bbox.ymax
else:
bbox = mtransforms.Bbox.union(bboxes2)
top = bbox.y1
self.label.set_position( (x, top+self.LABELPAD*self.figure.dpi / 72.0))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
if not len(bboxes):
bottom = self.axes.bbox.ymin
else:
bbox = mtransforms.Bbox.union(bboxes)
bottom = bbox.y0
self.offsetText.set_position((x, bottom-self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def get_text_heights(self, renderer):
"""
Returns the amount of space one should reserve for text
above and below the axes. Returns a tuple (above, below)
"""
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
above = 0.0
if bbox2.height:
above += bbox2.height + padPixels
below = 0.0
if bbox.height:
below += bbox.height + padPixels
if self.get_label_position() == 'top':
above += self.label.get_window_extent(renderer).height + padPixels
else:
below += self.label.get_window_extent(renderer).height + padPixels
return above, below
def set_ticks_position(self, position):
"""
Set the ticks position (top, bottom, both, default or none)
both sets the ticks to appear on both positions, but does not
change the tick labels. default resets the tick positions to
the default: ticks on both positions, labels at bottom. none
can be used if you don't want any ticks.
ACCEPTS: [ 'top' | 'bottom' | 'both' | 'default' | 'none' ]
"""
assert position in ('top', 'bottom', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'top':
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'bottom':
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
for t in ticks:
t.tick1On = True
t.tick2On = True
for t in ticks:
t.update_position(t._loc)
def tick_top(self):
'use ticks only on top'
self.set_ticks_position('top')
def tick_bottom(self):
'use ticks only on bottom'
self.set_ticks_position('bottom')
def get_ticks_position(self):
"""
Return the ticks position (top, bottom, default or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorTop=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorTop=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorTop and minorTop: return 'top'
MajorBottom=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
MinorBottom=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if MajorBottom and MinorBottom: return 'bottom'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervalx
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposx
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervalx
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervalx = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervalx = min(vmin, Vmin), max(vmax, Vmax)
class YAxis(Axis):
__name__ = 'yaxis'
axis_name = 'y'
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the y axis.
Returns *True* | *False*
"""
if callable(self._contains): return self._contains(self,mouseevent)
x,y = mouseevent.x,mouseevent.y
try:
trans = self.axes.transAxes.inverted()
xaxes,yaxes = trans.transform_point((x,y))
except ValueError:
return False, {}
l,b = self.axes.transAxes.transform_point((0,0))
r,t = self.axes.transAxes.transform_point((1,1))
inaxis = yaxes>=0 and yaxes<=1 and (
(x<l and x>l-self.pickradius) or
(x>r and x<r+self.pickradius))
return inaxis, {}
def _get_tick(self, major):
return YTick(self.axes, 0, '', major=major)
def _get_label(self):
# x in display coords (updated by _update_label_position)
# y in axes coords
label = mtext.Text(x=0, y=0.5,
# todo: get the label position
fontproperties=font_manager.FontProperties(size=rcParams['axes.labelsize']),
color = rcParams['axes.labelcolor'],
verticalalignment='center',
horizontalalignment='right',
rotation='vertical',
)
label.set_transform( mtransforms.blended_transform_factory(
mtransforms.IdentityTransform(), self.axes.transAxes) )
self._set_artist_props(label)
self.label_position='left'
return label
def _get_offset_text(self):
# x in display coords, y in axes coords (to be updated at draw time)
offsetText = mtext.Text(x=0, y=0.5,
fontproperties = font_manager.FontProperties(size=rcParams['ytick.labelsize']),
color = rcParams['ytick.color'],
verticalalignment = 'bottom',
horizontalalignment = 'left',
)
offsetText.set_transform(mtransforms.blended_transform_factory(
self.axes.transAxes, mtransforms.IdentityTransform()) )
self._set_artist_props(offsetText)
self.offset_text_position='left'
return offsetText
def get_label_position(self):
"""
Return the label position (left or right)
"""
return self.label_position
def set_label_position(self, position):
"""
Set the label position (left or right)
ACCEPTS: [ 'left' | 'right' ]
"""
assert position == 'left' or position == 'right'
if position == 'right':
self.label.set_horizontalalignment('left')
else:
self.label.set_horizontalalignment('right')
self.label_position=position
def _update_label_position(self, bboxes, bboxes2):
"""
Update the label position based on the sequence of bounding
boxes of all the ticklabels
"""
if not self._autolabelpos: return
x,y = self.label.get_position()
if self.label_position == 'left':
if not len(bboxes):
left = self.axes.bbox.xmin
else:
bbox = mtransforms.Bbox.union(bboxes)
left = bbox.x0
self.label.set_position( (left-self.LABELPAD*self.figure.dpi/72.0, y))
else:
if not len(bboxes2):
right = self.axes.bbox.xmax
else:
bbox = mtransforms.Bbox.union(bboxes2)
right = bbox.x1
self.label.set_position( (right+self.LABELPAD*self.figure.dpi/72.0, y))
def _update_offset_text_position(self, bboxes, bboxes2):
"""
Update the offset_text position based on the sequence of bounding
boxes of all the ticklabels
"""
x,y = self.offsetText.get_position()
top = self.axes.bbox.ymax
self.offsetText.set_position((x, top+self.OFFSETTEXTPAD*self.figure.dpi/72.0))
def set_offset_position(self, position):
assert position == 'left' or position == 'right'
x,y = self.offsetText.get_position()
if position == 'left': x = 0
else: x = 1
self.offsetText.set_ha(position)
self.offsetText.set_position((x,y))
def get_text_widths(self, renderer):
bbox, bbox2 = self.get_ticklabel_extents(renderer)
# MGDTODO: Need a better way to get the pad
padPixels = self.majorTicks[0].get_pad_pixels()
left = 0.0
if bbox.width:
left += bbox.width + padPixels
right = 0.0
if bbox2.width:
right += bbox2.width + padPixels
if self.get_label_position() == 'left':
left += self.label.get_window_extent(renderer).width + padPixels
else:
right += self.label.get_window_extent(renderer).width + padPixels
return left, right
def set_ticks_position(self, position):
"""
Set the ticks position (left, right, both or default)
both sets the ticks to appear on both positions, but
does not change the tick labels.
default resets the tick positions to the default:
ticks on both positions, labels on the left.
ACCEPTS: [ 'left' | 'right' | 'both' | 'default' | 'none' ]
"""
assert position in ('left', 'right', 'both', 'default', 'none')
ticks = list( self.get_major_ticks() ) # a copy
ticks.extend( self.get_minor_ticks() )
if position == 'right':
self.set_offset_position('right')
for t in ticks:
t.tick1On = False
t.tick2On = True
t.label1On = False
t.label2On = True
elif position == 'left':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = False
t.label1On = True
t.label2On = False
elif position == 'default':
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
t.label1On = True
t.label2On = False
elif position == 'none':
for t in ticks:
t.tick1On = False
t.tick2On = False
else:
self.set_offset_position('left')
for t in ticks:
t.tick1On = True
t.tick2On = True
def tick_right(self):
'use ticks only on right'
self.set_ticks_position('right')
def tick_left(self):
'use ticks only on left'
self.set_ticks_position('left')
def get_ticks_position(self):
"""
Return the ticks position (left, right, both or unknown)
"""
majt=self.majorTicks[0]
mT=self.minorTicks[0]
majorRight=(not majt.tick1On) and majt.tick2On and (not majt.label1On) and majt.label2On
minorRight=(not mT.tick1On) and mT.tick2On and (not mT.label1On) and mT.label2On
if majorRight and minorRight: return 'right'
majorLeft=majt.tick1On and (not majt.tick2On) and majt.label1On and (not majt.label2On)
minorLeft=mT.tick1On and (not mT.tick2On) and mT.label1On and (not mT.label2On)
if majorLeft and minorLeft: return 'left'
majorDefault=majt.tick1On and majt.tick2On and majt.label1On and (not majt.label2On)
minorDefault=mT.tick1On and mT.tick2On and mT.label1On and (not mT.label2On)
if majorDefault and minorDefault: return 'default'
return 'unknown'
def get_view_interval(self):
'return the Interval instance for this axis view limits'
return self.axes.viewLim.intervaly
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.axes.viewLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.axes.viewLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
def get_minpos(self):
return self.axes.dataLim.minposy
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.dataLim.intervaly
def set_data_interval(self, vmin, vmax, ignore=False):
'return the Interval instance for this axis data limits'
if ignore:
self.axes.dataLim.intervaly = vmin, vmax
else:
Vmin, Vmax = self.get_data_interval()
self.axes.dataLim.intervaly = min(vmin, Vmin), max(vmax, Vmax)
| agpl-3.0 |
Wittlich/DAT210x-Python | Module5/assignment10.py | 1 | 7755 | import numpy as np
import pandas as pd
import scipy.io.wavfile as wavfile
import os
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import check_random_state
# Good Luck!
#
# INFO:
# Samples = Observations. Each audio file will is a single sample
# in our dataset.
#
# Audio Samples = https://en.wikipedia.org/wiki/Sampling_(signal_processing)
# Each .wav file is actually just a bunch of numeric samples, "sampled"
# from the analog signal. Sampling is a type of discretization. When we
# mention 'samples', we mean observations. When we mention 'audio samples',
# we mean the actually "features" of the audio file.
#
#
# The goal of this lab is to use multi-target, linear regression to generate
# by extrapolation, the missing portion of the test audio file.
#
# Each one audio_sample features will be the output of an equation,
# which is a function of the provided portion of the audio_samples:
#
# missing_samples = f(provided_samples)
#
# You can experiment with how much of the audio you want to chop off
# and have the computer generate using the Provided_Portion parameter.
#
# TODO: Play with this. This is how much of the audio file will
# be provided, in percent. The remaining percent of the file will
# be generated via linear extrapolation.
provided_portion = 0.25
# INFO: You have to download the dataset (audio files) from the website:
# https://github.com/Jakobovski/free-spoken-digit-dataset
#
# TODO: Create a regular ol' Python List called 'zero'
# Loop through the dataset and load up all 50 of the 0_jackson*.wav files
# For each audio file, simply append the audio data (not the sample_rate,
# just the data!) to your Python list 'zero':
zero = [wavfile.read('Datasets/recordings/' + file)[1] for file in os.listdir('Datasets/recordings') if
'0_jackson' in file]
sample_rate = wavfile.read('Datasets/recordings/0_jackson_0.wav')[0]
#
# TODO: Just for a second, convert zero into a DataFrame. When you do
# so, set the dtype to np.int16, since the input audio files are 16
# bits per sample. If you don't know how to do this, read up on the docs
# here:
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
#
# Since these audio clips are unfortunately not length-normalized,
# we're going to have to just hard chop them to all be the same length.
# Since Pandas would have inserted NANs at any spot to make zero a
# perfectly rectangular [n_observed_samples, n_audio_samples] array,
# do a dropna on the Y axis here. Then, convert one back into an
# NDArray using .values
df = pd.DataFrame(zero)
df = df.apply(pd.to_numeric, errors='coerce')
df.dropna(axis=1, inplace=True)
zero = df.values
#
# TODO: It's important to know how (many audio_samples samples) long the
# data is now. 'zero' is currently shaped [n_samples, n_audio_samples],
# so get the n_audio_samples count and store it in a variable called
# n_audio_samples
n_audio_samples = zero.shape[1]
#
# TODO: Create your linear regression model here and store it in a
# variable called 'model'. Don't actually train or do anything else
# with it yet:
model = LinearRegression()
#
# INFO: There are 50 takes of each clip. You want to pull out just one
# of them, randomly, and that one will NOT be used in the training of
# your model. In other words, the one file we'll be testing / scoring
# on will be an unseen sample, independent to the rest of your
# training set:
rng = check_random_state(7) # Leave this alone until you've submitted your lab
random_idx = rng.randint(zero.shape[0])
test = zero[random_idx]
train = np.delete(zero, [random_idx], axis=0)
#
# TODO: Print out the shape of train, and the shape of test
# train will be shaped: [n_samples, n_audio_samples], where
# n_audio_samples are the 'features' of the audio file
# train will be shaped [n_audio_features], since it is a single
# sample (audio file, e.g. observation).
print('Train shape: {}\nTest shape: {}'.format(train.shape, test.shape))
#
# INFO: The test data will have two parts, X_test and y_test. X_test is
# going to be the first portion of the test audio file, which we will
# be providing the computer as input. y_test, the "label" if you will,
# is going to be the remaining portion of the audio file. Like such,
# the computer will use linear regression to derive the missing
# portion of the sound file based off of the training data its received!
#
# Save the original 'test' clip, the one you're about to delete
# half of, so that you can compare it to the 'patched' clip once
# you've generated it. HINT: you should have got the sample_rate
# when you were loading up the .wav files:
wavfile.write('Original Test Clip.wav', sample_rate, test)
#
# TODO: Prepare the TEST date by creating a slice called X_test. It
# should have Provided_Portion * n_audio_samples audio sample features,
# taken from your test audio file, currently stored in the variable
# 'test'. In other words, grab the FIRST Provided_Portion *
# n_audio_samples audio features from test and store it in X_test.
X_test = test[:int(provided_portion * test.shape[0])]
#
# TODO: If the first Provided_Portion * n_audio_samples features were
# stored in X_test, then we need to also grab the *remaining* audio
# features and store it in y_test. With the remaining features stored
# in there, we will be able to R^2 "score" how well our algorithm did
# in completing the sound file.
y_test = test[int(provided_portion * test.shape[0]):]
#
# TODO: Duplicate the same process for X_train, y_train. The only
# differences being: 1) Your will be getting your audio data from
# 'train' instead of from 'test', 2) Remember the shape of train that
# you printed out earlier? You want to do this slicing but for ALL
# samples (observations). For each observation, you want to slice
# the first Provided_Portion * n_audio_samples audio features into
# X_train, and the remaining go into y_test. All of this should be
# accomplishable using regular indexing in two lines of code.
X_train = train[:, :int(provided_portion * train.shape[1])]
y_train = train[:, int(provided_portion * train.shape[1]):]
#
# TODO: SciKit-Learn gets mad if you don't supply your training
# data in the form of a 2D arrays: [n_samples, n_features].
#
# So if you only have one SAMPLE, such as is our case with X_test,
# and y_test, then by calling .reshape(1, -1), you can turn
# [n_features] into [1, n_features].
#
# On the other hand, if you only have one FEATURE, which currently
# doesn't apply, you can call .reshape(-1, 1) on your data to turn
# [n_samples] into [n_samples, 1]:
X_test = X_test.reshape(1, -1)
y_test = y_test.reshape(1, -1)
#
# TODO: Fit your model using your training data and label:
model.fit(X_train, y_train)
#
# TODO: Use your model to predict the 'label' of X_test. Store the
# resulting prediction in a variable called y_test_prediction
y_test_prediction = model.predict(X_test)
# INFO: SciKit-Learn will use float64 to generate your predictions
# so let's take those values back to int16:
y_test_prediction = y_test_prediction.astype(dtype=np.int16)
#
# TODO: Score how well your prediction would do for some good laughs,
# by passing in your test data and test label (y_test).
score = model.score(X_test, y_test)
print('Extrapolation R^2 Score: {}'.format(score))
#
# First, take the first Provided_Portion portion of the test clip, the
# part you fed into your linear regression model. Then, stitch that
# together with the abomination the predictor model generated for you,
# and then save the completed audio clip:
completed_clip = np.hstack((X_test, y_test_prediction))
wavfile.write('Extrapolated Clip.wav', sample_rate, completed_clip[0])
#
# INFO: Congrats on making it to the end of this crazy lab =) !
#
| mit |
htygithub/bokeh | bokeh/crossfilter/plotting.py | 42 | 8763 | from __future__ import absolute_import
import numpy as np
import pandas as pd
from bokeh.models import ColumnDataSource, BoxSelectTool
from ..plotting import figure
def cross(start, facets):
"""Creates a unique combination of provided facets.
A cross product of an initial set of starting facets with a new set of
facets.
Args:
start (list): List of lists of facets
facets (list): List of facets
Returns:
list: a list of lists of unique combinations of facets
"""
new = [[facet] for facet in facets]
result = []
for x in start:
for n in new:
result.append(x + n)
return result
def hide_axes(plot, axes=('x', 'y')):
"""Hides the axes of the plot by setting component alphas.
Args:
plot (Figure): a valid figure with x and y axes
axes (tuple or list or str, optional): the axes to hide the axis on.
"""
if isinstance(axes, str):
axes = tuple(axes)
for label in axes:
axis = getattr(plot, label + 'axis')
axis = axis[0]
axis.major_label_text_alpha = 0.0
axis.major_label_text_font_size = '0pt'
axis.axis_line_alpha = 0.0
axis.major_tick_line_alpha = 0.0
axis.minor_tick_line_alpha = 0.0
plot.min_border = 0
def make_histogram_source(series):
"""Creates a ColumnDataSource containing the bins of the input series.
Args:
series (:py:class:`~pandas.Series`): description
Returns:
ColumnDataSource: includes bin centers with count of items in the bins
"""
counts, bins = np.histogram(series, bins=50)
centers = pd.rolling_mean(bins, 2)[1:]
return ColumnDataSource(data={'counts': counts, 'centers': centers})
def make_continuous_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):
"""Makes discrete, then creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str, optional): the column in df that maps to the y dim of the plot
df_orig (DataFrame, optional): original dataframe that the subset ``df`` was
generated from
agg (str, optional): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
# Generate dataframe required to use the categorical bar source function
idx, edges = pd.cut(x=df[x_field], bins=8, retbins=True, labels=False)
labels, edges = pd.cut(x=df[x_field], bins=8, retbins=True)
centers = pd.rolling_mean(edges, 2)[1:]
# store new value of x as the bin it fell into
df['centers'] = centers[idx]
df['labels'] = labels
# After making it discrete, create the categorical bar source
return make_categorical_bar_source(df, 'labels', y_field, df_orig, agg)
def make_categorical_bar_source(df, x_field, y_field='None', df_orig=None, agg='count'):
"""Creates representation of the bars to be plotted.
Args:
df (DataFrame): contains the data to be converted to a discrete form
x_field (str): the column in df that maps to the x dim of the plot
y_field (str, optional): the column in df that maps to the y dim of the plot
df_orig (DataFrame, optional): original dataframe that the subset ``df`` was
generated from
agg (str, optional): the type of aggregation to be used
Returns:
ColumnDataSource: aggregated, discrete form of x,y values
"""
if df_orig is None:
df_orig = df
# handle x-only aggregations separately
if agg == 'percent' or agg == 'count':
# percent aggregations are a special case, since pandas doesn't directly support
if agg == 'percent':
# percent on discrete col using proportion, on continuous using percent
if df[y_field].dtype == 'object':
agg_func = 'count'
else:
agg_func = 'sum'
total = float(getattr(df_orig[y_field], agg_func)())
series = df.groupby(x_field)[y_field].apply(lambda x, total_agg=total, f=agg_func:
100*(getattr(x, f)()/total_agg))
elif agg == 'count':
series = df.groupby(x_field).size()
else:
raise ValueError('Unrecognized Aggregation Type for Y of "None"')
# here we have a series where the values are the aggregation for the index (bars)
result = pd.DataFrame(data={'labels': series.index, 'heights': series.values})
# x and y aggregations
else:
# Get the y values after grouping by the x values
group = df.groupby(x_field)[y_field]
aggregate = getattr(group, agg)
result = aggregate().reset_index()
result.rename(columns={x_field: 'labels', y_field: 'heights'}, inplace=True)
return ColumnDataSource(data=result)
def make_factor_source(series):
"""Generate data source that is based on the unique values in the series.
Args:
series (:py:class:`~pandas.Series`): contains categorical-like data
Returns:
ColumnDataSource: contains the unique values from the series
"""
return ColumnDataSource(data={'factors': series.unique()})
def make_bar_plot(datasource, counts_name="counts",
centers_name="centers",
bar_width=0.7,
x_range=None,
y_range=None,
plot_width=500, plot_height=500,
tools="pan,wheel_zoom,box_zoom,save,resize,box_select,reset",
title_text_font_size="12pt"):
"""Utility function to set/calculate default parameters of a bar plot.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
bar_width (float): the width of the bars in the bar plot
x_range (list): list of two values, the min and max of the x axis range
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: plot generated from the provided parameters
"""
top = np.max(datasource.data[counts_name])
# Create the figure container
plot = figure(
title="", title_text_font_size=title_text_font_size,
plot_width=plot_width, plot_height=plot_height,
x_range=x_range, y_range=[0, top], tools=tools)
# Get the bar values
y = [val/2.0 for val in datasource.data[counts_name]]
# Generate the bars in the figure
plot.rect(centers_name, y, bar_width, counts_name, source=datasource)
plot.min_border = 0
plot.h_symmetry = False
plot.v_symmetry = False
for tool in plot.select(type=BoxSelectTool):
tool.dimensions = ['width']
return plot
def make_histogram(datasource,
counts_name="counts",
centers_name="centers",
x_range=None,
bar_width=0.7,
plot_width=500,
plot_height=500,
min_border=40,
tools=None,
title_text_font_size="12pt"):
"""Utility function to create a histogram figure.
This is used to create the filter widgets for continuous data in
CrossFilter.
Args:
datasource (ColumnDataSource): represents bars to plot
counts_name (str): column corresponding to height of the bars
centers_name (str): column corresponding to the location of the bars
x_range (list): list of two values, the min and max of the x axis range
bar_width (float): the width of the bars in the bar plot
plot_width (float): width of the plot in pixels
plot_height (float): height of the plot in pixels
min_border (float): minimum border width of figure in pixels
tools (str): comma separated tool names to add to the plot
title_text_font_size (str): size of the plot title, e.g., '12pt'
Returns:
figure: histogram plot generated from the provided parameters
"""
start = np.min(datasource.data[centers_name]) - bar_width
end = np.max(datasource.data[centers_name]) - bar_width
plot = make_bar_plot(
datasource, counts_name=counts_name, centers_name=centers_name,
x_range=[start, end], plot_width=plot_width, plot_height=plot_height,
tools=tools, title_text_font_size=title_text_font_size)
return plot
| bsd-3-clause |
abimannans/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
SanPen/GridCal | src/research/PGD/V1_refractored.py | 1 | 16952 | # PGD code, merging Santiago's improvements
import time
import pandas as pd
import numpy as np
import sys
from math import *
# from mpmath import mp
# mp.dps = 100
pd.options.display.precision = 2
pd.set_option('display.precision', 2)
def progress_bar(i, n, size): # show the percentage
percent = float(i) / float(n)
sys.stdout.write("\r"
+ str(int(i)).rjust(3, '0')
+ "/"
+ str(int(n)).rjust(3, '0')
+ ' ['
+ '=' * ceil(percent * size)
+ ' ' * floor((1 - percent) * size)
+ ']')
def read_grid_data(fname):
"""
Read the grid data
:param fname: name of the excel file
:return: n_buses, Qmax, Qmin, Y, Yinv, V_mod, P_pq, Q_pq, P_pv, I0_pq, n_pv, n_pq
"""
df_b = pd.read_excel(fname, sheet_name="buses")
df_l = pd.read_excel(fname, sheet_name="lines")
# BEGIN INITIALIZATION OF DATA
n_b = 0
n_pq = 0
n_pv = 0
pq = []
pv = []
pq0 = [] # store pq buses indices relative to its own
pv0 = [] # store pv buses indices relative to its own
d_pq = {} # dict of pq
d_pv = {} # dict of pv
for i in range(len(df_b)):
if df_b.iloc[i, 4] == "slack": # index 0 is reserved for the slack bus
pass
elif df_b.iloc[i, 4] == "PQ":
pq0.append(n_pq)
d_pq[df_b.iloc[i, 0]] = n_pq
n_b += 1
n_pq += 1
pq.append(df_b.iloc[i, 0] - 1)
elif df_b.iloc[i, 4] == "PV":
pv0.append(n_pv)
d_pv[df_b.iloc[i, 0]] = n_pv
n_b += 1
n_pv += 1
pv.append(df_b.iloc[i, 0] - 1)
n_l = len(df_l) # number of lines
V0 = df_b.iloc[0, 3] # the slack is always positioned in the first row
I0_pq = np.zeros(n_pq, dtype=complex)
I0_pv = np.zeros(n_pv, dtype=complex)
Y = np.zeros((n_b, n_b), dtype=complex) # I will build it with block matrices
Y11 = np.zeros((n_pq, n_pq), dtype=complex) # pq pq
Y12 = np.zeros((n_pq, n_pv), dtype=complex) # pq pv
Y21 = np.zeros((n_pv, n_pq), dtype=complex) # pv pq
Y22 = np.zeros((n_pv, n_pv), dtype=complex) # pv pv
for i in range(n_l):
Ys = 1 / (df_l.iloc[i, 2] + 1j * df_l.iloc[i, 3]) # series element
Ysh = df_l.iloc[i, 4] + 1j * df_l.iloc[i, 5] # shunt element
t = df_l.iloc[i, 6] * np.cos(df_l.iloc[i, 7]) + 1j * df_l.iloc[i, 6] * np.sin(
df_l.iloc[i, 7]) # tap as a complex number
a = df_l.iloc[i, 0]
b = df_l.iloc[i, 1]
if a == 0:
if b - 1 in pq:
I0_pq[d_pq[b]] += V0 * Ys / t
Y11[d_pq[b], d_pq[b]] += Ys + Ysh
if b - 1 in pv:
I0_pv[d_pv[b]] += V0 * Ys / t
Y22[d_pv[b], d_pv[b]] += Ys + Ysh
elif b == 0:
if a - 1 in pq:
I0_pq[d_pq[a]] += V0 * Ys / np.conj(t)
Y11[d_pq[a], d_pq[a]] += (Ys + Ysh) / (t * np.conj(t))
if a - 1 in pv:
I0_pv[d_pv[a]] += V0 * Ys / np.conj(t)
Y22[d_pv[a], d_pv[a]] += (Ys + Ysh) / (t * np.conj(t))
else:
if a - 1 in pq and b - 1 in pq:
Y11[d_pq[a], d_pq[a]] += (Ys + Ysh) / (t * np.conj(t))
Y11[d_pq[b], d_pq[b]] += Ys + Ysh
Y11[d_pq[a], d_pq[b]] += - Ys / np.conj(t)
Y11[d_pq[b], d_pq[a]] += - Ys / t
if a - 1 in pq and b - 1 in pv:
Y11[d_pq[a], d_pq[a]] += (Ys + Ysh) / (t * np.conj(t))
Y22[d_pv[b], d_pv[b]] += Ys + Ysh
Y12[d_pq[a], d_pv[b]] += - Ys / np.conj(t)
Y21[d_pv[b], d_pq[a]] += - Ys / t
if a - 1 in pv and b - 1 in pq:
Y22[d_pv[a], d_pv[a]] += (Ys + Ysh) / (t * np.conj(t))
Y11[d_pq[b], d_pq[b]] += Ys + Ysh
Y21[d_pv[a], d_pq[b]] += - Ys / np.conj(t)
Y12[d_pq[b], d_pv[a]] += - Ys / t
if a - 1 in pv and b - 1 in pv:
Y22[d_pv[a], d_pv[a]] += (Ys + Ysh) / (t * np.conj(t))
Y22[d_pv[b], d_pv[b]] += Ys + Ysh
Y22[d_pv[a], d_pv[b]] += - Ys / np.conj(t)
Y22[d_pv[b], d_pv[a]] += - Ys / t
# add shunts connected directly to the bus
for i in range(len(df_b)):
a = df_b.iloc[i, 0]
if a - 1 in pq:
Y11[d_pq[a], d_pq[a]] += df_b.iloc[i, 5] + 1j * df_b.iloc[i, 6]
elif a - 1 in pv:
Y22[d_pv[a], d_pv[a]] += df_b.iloc[i, 5] + 1j * df_b.iloc[i, 6]
Y = np.block([[Y11, Y12], [Y21, Y22]])
Yinv = np.linalg.inv(Y)
V_mod = np.zeros(n_pv, dtype=float)
P_pq = np.zeros(n_pq, dtype=float)
P_pv = np.zeros(n_pv, dtype=float)
Q_pq = np.zeros(n_pq, dtype=float)
for i in range(len(df_b)):
if df_b.iloc[i, 4] == "PV":
V_mod[d_pv[df_b.iloc[i, 0]]] = df_b.iloc[i, 3]
P_pv[d_pv[df_b.iloc[i, 0]]] = df_b.iloc[i, 1]
elif df_b.iloc[i, 4] == "PQ":
Q_pq[d_pq[df_b.iloc[i, 0]]] = df_b.iloc[i, 2]
P_pq[d_pq[df_b.iloc[i, 0]]] = df_b.iloc[i, 1]
n_buses = np.shape(Y)[0] # number of buses
return n_buses, Y, Yinv, V_mod, P_pq, Q_pq, P_pv, I0_pq, n_pv, n_pq
def init_apparent_powers_decomposition(n_buses, n_scale, n_time, P_pq, Q_pq):
"""
:param n_buses:
:param n_scale:
:param n_time:
:param P_pq:
:param Q_pq:
:return:
"""
SSk = np.empty((n_buses, 2), dtype=complex)
SSp = np.empty((n_time, 2), dtype=complex)
SSq = np.empty((n_scale, 2), dtype=complex)
SKk0 = P_pq + Q_pq * 1j # original load
SPp0 = np.ones(n_time) # the original loads do not change with time
SQq0 = np.ones(n_scale) # the original loads are not scaled
SSk[:, 0] = np.conj(SKk0)
SSp[:, 0] = np.conj(SPp0)
SSq[:, 0] = np.conj(SQq0)
SKk1 = - 0.2 * np.random.rand(n_buses) # load/generator of random active power, change this to try different cases
SPp1 = np.random.rand(n_time)
SQq1 = np.random.rand(n_scale)
# SKk1 = - np.array([0.1, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.2]) # change this if the dimensions change
# SPp1 = np.array([0.0, 0.2, 0.3, 0.7, 0.7, 0.3, 0.2, 0.1])
# SQq1 = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
SSk[:, 0] = np.conj(SKk1)
SSp[:, 0] = np.conj(SPp1)
SSq[:, 0] = np.conj(SQq1)
return SSk, SSp, SSq
def init_voltages_decomposition(n_mm, n_buses, n_scale, n_time):
"""
:param n_buses:
:param n_scale:
:param n_time:
:return:
"""
# DECOMPOSITION OF VOLTAGES
VVk = np.zeros((n_buses, n_mm + 1), dtype=complex)
VVp = np.zeros((n_time, n_mm + 1), dtype=complex)
VVq = np.zeros((n_scale, n_mm + 1), dtype=complex)
Kkv = np.ones(n_buses, dtype=complex) # amplitude vector
Ppv = np.ones(n_time) # position vector
Qqv = np.ones(n_scale) # scaling vector
VVk[:, 0] = np.conj(Kkv)
VVp[:, 0] = np.conj(Ppv)
VVq[:, 0] = np.conj(Qqv)
return VVk, VVp, VVq
def init_currents_decomposition(n_gg, n_mm, n_buses, n_scale, n_time):
"""
:return:
"""
# DECOMPOSITION OF CURRENTS
# IIk = np.zeros((n_buses, n_gg * n_mm), dtype=complex)
# IIp = np.zeros((n_time, n_gg * n_mm), dtype=complex)
# IIq = np.zeros((n_scale, n_gg * n_mm), dtype=complex)
IIk = np.zeros((n_buses, n_mm + 1), dtype=complex)
IIp = np.zeros((n_time, n_mm + 1), dtype=complex)
IIq = np.zeros((n_scale, n_mm + 1), dtype=complex)
return IIk, IIp, IIq
def fun_C(SSk, SSp, SSq, VVk, VVp, VVq, IIk, IIp, IIq, n_i_coeff, n_v_coeff, n_bus, n_scale, n_time):
"""
:param SSk:
:param SSp:
:param SSq:
:param VVk:
:param VVp:
:param VVq:
:param IIk:
:param IIp:
:param IIq:
:param n: number of coefficients fo far
:return:
"""
# Ns = SSk.shape[0]
# Nc = Ns + n_v_coeff * n_i_coeff
# CCk = np.empty((Nc, n_bus), dtype=complex)
# CCp = np.empty((Nc, n_time), dtype=complex)
# CCq = np.empty((Nc, n_scale), dtype=complex)
# initialize with the S* decomposed variables
# CCk[:2, :] = SSk
# CCp[:2, :] = SSp
# CCq[:2, :] = SSq
# idx = 2
# for ii in range(n_v_coeff):
# for jj in range(n_i_coeff):
# CCk[idx, :] = - VVk[ii, :] * IIk[jj, :]
# CCp[idx, :] = - VVp[ii, :] * IIp[jj, :]
# CCq[idx, :] = - VVq[ii, :] * IIq[jj, :]
# idx += 1
Nc = n_v_coeff * n_i_coeff + 2
CCk = np.empty((n_bus, Nc), dtype=complex)
CCp = np.empty((n_time, Nc), dtype=complex)
CCq = np.empty((n_scale, Nc), dtype=complex)
CCk[:, :2] = SSk
CCp[:, :2] = SSp
CCq[:, :2] = SSq
idx = 2
for ii in range(n_v_coeff):
for jj in range(n_i_coeff):
CCk[:, idx] = - VVk[:, ii] * IIk[:, jj]
CCp[:, idx] = - VVp[:, ii] * IIp[:, jj]
CCq[:, idx] = - VVq[:, ii] * IIq[:, jj]
idx += 1
return CCk, CCp, CCq, Nc, n_v_coeff, n_i_coeff
def build_map(MMk, MMp, MMq, n_mm):
"""
Build 3-D mapping from decomposed variables
:param MMk:
:param MMp:
:param MMq:
:return:
"""
MM_map = np.multiply.outer(np.multiply.outer(MMp[:, 0], MMk[:, 0]), MMq[:, 0])
n = len(MMk)
for i in range(1, n_mm + 1):
print(i)
# the tri-dimensional representation I am looking for
MM_map += np.multiply.outer(np.multiply.outer(MMp[:, i], MMk[:, i]), MMq[:, i])
progress_bar(i + 1, n, 50)
return MM_map
def save_mapV(MM_map, fname, n_time):
"""
:param MM_map: 3D tensor for voltages
:param fname:
:return:
"""
writer = pd.ExcelWriter(fname)
for i in range(n_time):
V_map_df = pd.DataFrame(np.conj(MM_map[:][i][:]))
V_map_df.to_excel(writer, sheet_name=str(i))
writer.save()
def save_mapI(MM_map, fname, n_time):
"""
:param MM_map: 3D tensor for currents
:param fname:
:return:
"""
writer = pd.ExcelWriter(fname)
for i in range(n_time):
V_map_df = pd.DataFrame(MM_map[:][i][:])
V_map_df.to_excel(writer, sheet_name=str(i))
writer.save()
def save_mapS(MM_map, fname, n_time):
"""
:param MM_map: 3D tensor for powers
:param fname:
:return:
"""
writer = pd.ExcelWriter(fname)
for i in range(n_time):
V_map_df = pd.DataFrame(np.conj(MM_map[:][i][:]))
V_map_df.to_excel(writer, sheet_name=str(i))
writer.save()
def pgd(fname, n_gg=20, n_mm=20, n_kk=20, n_scale=12, n_time=8):
"""
:param fname: data file name
:param n_gg: outer iterations
:param n_mm: intermediate iterations
:param n_kk: inner iterations
:param n_scale: number of discretized points, arbitrary
:param n_time: number of discretized time periods, arbitrary
:return:
"""
n_buses, Y, Yinv, V_mod, P_pq, Q_pq, P_pv, I0_pq, n_pv, n_pq = read_grid_data(fname)
SSk, SSp, SSq = init_apparent_powers_decomposition(n_buses, n_scale, n_time, P_pq, Q_pq)
# VVk, VVp, VVq = init_voltages_decomposition(n_mm, n_buses, n_scale, n_time)
# IIk, IIp, IIq = init_currents_decomposition(n_gg, n_mm, n_buses, n_scale, n_time)
n_max = n_gg * n_mm * n_kk
iter_count = 0
idx_i = 0
idx_v = 1
for gg in range(n_gg): # outer loop: iterate on γ to solve the power flow as such
VVk, VVp, VVq = init_voltages_decomposition(n_mm, n_buses, n_scale, n_time)
IIk, IIp, IIq = init_currents_decomposition(n_gg, n_mm, n_buses, n_scale, n_time)
idx_i = 0
idx_v = 1
for mm in range(n_mm): # intermediate loop: iterate on i to find the superposition of terms of the I tensor.
# define the new C
CCk, CCp, CCq, Nc, Nv, n = fun_C(SSk, SSp, SSq,
VVk, VVp, VVq,
IIk, IIp, IIq,
idx_i, idx_v, n_buses,
n_scale, n_time)
# CCk, CCp, CCq, Nc, Nv, n = fun_C2(SSk, SSp, SSq, VVk, VVp, VVq, IIk, IIp, IIq)
# initialize the residues we have to find
# IIk1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1 # could also try to set IIk1 = VVk1
IIk1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * (n_mm - mm) ** 2 / n_mm ** 2
IIp1 = (np.random.rand(n_time) - np.random.rand(n_time)) * 1
IIq1 = (np.random.rand(n_scale) - np.random.rand(n_scale)) * 1
for kk in range(n_kk): # inner loop: iterate on Γ to find the residues.
# compute IIk1 (residues on Ik)
RHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRK = np.dot(IIp1, CCp[:, ii]) * np.dot(IIq1, CCq[:, ii])
RHSk += prodRK * CCk[:, ii]
LHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLK = np.dot(IIp1, VVp[:, ii] * IIp1) * np.dot(IIq1, VVq[:, ii] * IIq1)
LHSk += prodLK * VVk[:, ii]
IIk1 = RHSk / LHSk
# IIk1 = RHSk / (LHSk + 1e-7)
# compute IIp1 (residues on Ip)
RHSp = np.zeros(n_time, dtype=complex)
for ii in range(Nc):
prodRP = np.dot(IIk1, CCk[:, ii]) * np.dot(IIq1, CCq[:, ii])
RHSp += prodRP * CCp[:, ii]
LHSp = np.zeros(n_time, dtype=complex)
for ii in range(Nv):
prodLP = np.dot(IIk1, VVk[:, ii] * IIk1) * np.dot(IIq1, VVq[:, ii] * IIq1)
LHSp += prodLP * VVp[:, ii]
IIp1 = RHSp / LHSp
# compute IIq1 (residues on Iq)
RHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nc):
prodRQ = np.dot(IIk1, CCk[:, ii]) * np.dot(IIp1, CCp[:, ii])
RHSq += prodRQ * CCq[:, ii]
LHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nv):
prodLQ = np.dot(IIk1, VVk[:, ii] * IIk1) * np.dot(IIp1, VVp[:, ii] * IIp1)
LHSq += prodLQ * VVq[:, ii]
IIq1 = RHSq / LHSq
progress_bar(iter_count, n_max, 50) # display the inner operations
iter_count += 1
IIk[:, idx_i] = IIk1
IIp[:, idx_i] = IIp1
IIq[:, idx_i] = IIq1
idx_i += 1
for ii in range(n_mm):
VVk[:, ii] = np.conj(np.dot(Yinv, IIk[:, ii]))
VVp[:, ii] = np.conj(IIp[:, ii])
VVq[:, ii] = np.conj(IIq[:, ii])
# try to add I0 this way:
VVk[:, n_mm] = np.conj(np.dot(Yinv, I0_pq))
VVp[:, n_mm] = np.ones(n_time)
VVq[:, n_mm] = np.ones(n_scale)
idx_v = n_mm + 1
# VVk: size (n_mm + 1, nbus)
# VVp: size (n_mm + 1, nbus)
# VVq: size (n_mm + 1, n_scale)
v_map = build_map(VVk, VVp, VVq, n_mm)
# SSk: size (2, nbus)
# SSp: size (2, nbus)
# SSq: size (2, n_scale)
s_map = build_map(SSk, SSp, SSq, 1)
# s_map = build_map(SSk, SSp, SSq, n_mm)
# IIk: size (n_gg * n_mm, nbus)
# IIp: size (n_gg * n_mm, nbus)
# IIq: size (n_gg * n_mm, n_scale)
i_map = build_map(IIk, IIp, IIq, n_mm)
# the size of the maps is nbus, ntime, n_scale
vec_error = checking(Y, v_map, s_map, I0_pq, n_buses, n_time, n_scale)
return v_map, s_map, i_map, vec_error
def checking(Y, V_map, S_map, I0_pq, n_buses, n_time, n_scale):
"""
:param Y: data file name
:param V_map: outer iterations
:param S_map: intermediate iterations
:param I0_pq: inner iterations
:param n_buses: number of buses
:param n_scale: number of discretized points, arbitrary
:param n_time: number of discretized time periods, arbitrary
:return: maximum current error
"""
I_mis = []
for n_sheet in range(n_time):
for n_col in range(n_scale):
YV_prod = np.dot(Y, np.conj(V_map[n_sheet, :, n_col]))
# YV_prod = np.dot(Y, V_map[n_sheet,:,n_col])
I22 = YV_prod - I0_pq
I11 = []
for kk in range(n_buses):
I11.append(S_map[n_sheet, kk, n_col] / V_map[n_sheet, kk, n_col])
# I11.append(np.conj(S_map[n_sheet,kk,n_col]) / np.conj(V_map[n_sheet,kk,n_col]))
I_mis.append(abs(max(np.abs(I11 - I22))))
return I_mis
v_map_, s_map_, i_map_, vec_error_ = pgd('data_10PQ_mesh_r.xlsx', n_gg=25, n_mm=25, n_kk=8, n_scale=10, n_time=8)
for i in range(len(vec_error_)):
print(vec_error_[i])
n_time = 8
save_mapV(v_map_, 'Map_V2.xlsx', n_time)
save_mapI(i_map_, 'Map_I2.xlsx', n_time)
save_mapS(s_map_, 'Map_S2.xlsx', n_time)
| gpl-3.0 |
xzh86/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
google-research/episodic-curiosity | third_party/baselines/bench/monitor.py | 1 | 5799 | # coding=utf-8
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
import numpy as np
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
self.f.write('#%s\n'%json.dumps({"t_start": self.tstart, 'env_id' : env.spec and env.spec.id}))
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+reset_keywords+info_keywords)
self.logger.writeheader()
self.f.flush()
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
info['episode'] = epinfo
self.total_steps += 1
return (ob, rew, done, info)
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
def test_monitor():
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 'gym_version', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file)
| apache-2.0 |
bennlich/scikit-image | doc/examples/plot_regionprops.py | 14 | 1296 | """
=========================
Measure region properties
=========================
This example shows how to measure properties of labelled image regions.
"""
import math
import matplotlib.pyplot as plt
import numpy as np
from skimage.draw import ellipse
from skimage.measure import label, regionprops
from skimage.transform import rotate
image = np.zeros((600, 600))
rr, cc = ellipse(300, 350, 100, 220)
image[rr,cc] = 1
image = rotate(image, angle=15, order=0)
label_img = label(image)
regions = regionprops(label_img)
fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)
for props in regions:
y0, x0 = props.centroid
orientation = props.orientation
x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length
ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
ax.plot(x0, y0, '.g', markersize=15)
minr, minc, maxr, maxc = props.bbox
bx = (minc, maxc, maxc, minc, minc)
by = (minr, minr, maxr, maxr, minr)
ax.plot(bx, by, '-b', linewidth=2.5)
ax.axis((0, 600, 600, 0))
plt.show()
| bsd-3-clause |
srio/shadow3-scripts | COMSOL/comsol2shadow_2.py | 1 | 4213 |
import numpy
from scipy import interpolate
from scipy import spatial
import matplotlib.pylab as plt
import matplotlib as mpl
# from scipy.spatial import Delaunay
# from scipy.interpolate import griddata
def load_comsol_file(filename,nx=300,ny=100,xmax=None,ymax=None,four_quadrants=2,do_plot=False):
a = numpy.loadtxt(filename,skiprows=9)
node = numpy.round(a,10) # 1/4 model: 55mm*35mmm, units in m
# Coordinates
X0 = node[:,2] # X=x in m
Y0 = node[:,0] # Y=z in m
Z0 = node[:,3] # Z=uy vertical displacement in m
if four_quadrants == 2:
X1 = numpy.concatenate( (X0,-X0) )
Y1 = numpy.concatenate( (Y0, Y0) )
Z1 = numpy.concatenate( (Z0, Z0) )
X = numpy.concatenate( (X1, X1) )
Y = numpy.concatenate( (Y1,-Y1) )
Z = numpy.concatenate( (Z1, Z1) )
else:
X = X0
Y = Y0
Z = Z0
#
# Interpolation
#
if xmax is None:
xmax = numpy.abs(X).max()
if ymax is None:
ymax = numpy.abs(Y).max()
# triangulation
Pi = numpy.array([X, Y]).transpose()
tri = spatial.Delaunay(Pi)
if do_plot:
plt.triplot(X0, Y0 , tri.simplices.copy())
plt.plot(X0, Y0, "or", label = "Data")
plt.grid()
plt.legend()
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if four_quadrants == 2:
xlin = numpy.linspace(-xmax,xmax,nx)
ylin = numpy.linspace(-ymax,ymax,ny)
else:
xlin = numpy.linspace(0,xmax,nx)
ylin = numpy.linspace(0,ymax,ny)
XLIN = numpy.outer(xlin,numpy.ones_like(ylin))
YLIN = numpy.outer(numpy.ones_like(xlin),ylin)
# interpolation
P = numpy.array([XLIN.flatten(), YLIN.flatten() ]).transpose()
z = interpolate.griddata(Pi, Z, P, method = "cubic").reshape([nx,ny])
if do_plot:
plt.contourf(XLIN, YLIN, z, 50, cmap = mpl.cm.jet)
plt.colorbar()
plt.contour(XLIN, YLIN, z, 20, colors = "k")
#plt.triplot(Xi, Yi , tri.simplices.copy(), color = "k")
plt.plot(X0, Y0, "or", label = "Data")
plt.legend()
plt.grid()
plt.show()
if four_quadrants == 1:
z1 = numpy.vstack((numpy.flip(z,axis=0),z[1:,:]))
z2 = numpy.hstack((numpy.flip(z1,axis=1),z1[:,1:]))
xlin2 = numpy.concatenate((-xlin[::-1],xlin[1:]))
ylin2 = numpy.concatenate((-ylin[::-1],ylin[1:]))
return z2,xlin2,ylin2
else:
return z,xlin,ylin
def write_shadow_surface(s,xx,yy,filename='presurface.dat'):
"""
write_shadowSurface: writes a mesh in the SHADOW/presurface format
SYNTAX:
out = write_shadowSurface(z,x,y,filename=filename)
INPUTS:
z - 2D array of heights z(x,y)
x - 1D array of spatial coordinates along mirror width.
y - 1D array of spatial coordinates along mirror length.
OUTPUTS:
filename - output file in SHADOW format. If undefined, the
file is names "presurface.dat"
"""
try:
fs = open(filename, 'w')
except IOError:
out = 0
print ("Error: can\'t open file: "+filename)
return
else:
# dimensions
fs.write( "%d %d \n"%(xx.size,yy.size))
# y array
for i in range(yy.size):
fs.write("%g "%(yy[i]))
fs.write("\n")
# for each x element, the x value followed by the corresponding z(y) profile
for i in range(xx.size):
tmps = ""
for j in range(yy.size):
tmps += "%g "%(s[i,j])
fs.write("%g %s \n"%(xx[i],tmps))
fs.close()
print ("write_shadow_surface: File for SHADOW "+filename+" written to disk.")
if __name__ == "__main__":
from srxraylib.plot.gol import plot_image, plot
z,x,y = load_comsol_file("brut.txt",nx=400,ny=150,four_quadrants=2,do_plot=False)
plot_image(z,x,y,xtitle="X (%d pixels, max:%f)"%(x.size,x.max()),ytitle="Y (%d pixels, max:%f)"%(y.size,y.max()),show=0)
plot(x,z[:,z.shape[1]//2],show=0)
plot(y,z[z.shape[0]//2,:])
print(z.shape,x.shape,y.shape)
write_shadow_surface(z,x,y, filename="brut.dat")
| mit |
sonnyhu/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
mikekestemont/tag | tag/tagger.py | 1 | 35876 | from __future__ import print_function
import os
import shutil
import ConfigParser
from operator import itemgetter
import cPickle as pickle
from sklearn.preprocessing import LabelEncoder
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.utils import np_utils
from embeddings import EmbeddingsPretrainer
import utils
from model import build_model
font = {'size' : 10}
plt.rc('font', **font)
MODELS_DIR = '../models'
if not os.path.isdir(MODELS_DIR):
os.mkdir(MODELS_DIR)
class Tagger:
def __init__(self, config_path=None):
if config_path:
config = ConfigParser.ConfigParser()
config.read(config_path)
# parse the param
param_dict = dict()
for section in config.sections():
for name, value in config.items(section):
if value.isdigit():
value = int(value)
elif value == "True":
value = True
elif value == "False":
value = False
param_dict[name] = value
# set params:
self.train_dir = param_dict['train_dir']
self.dev_dir = param_dict['dev_dir']
self.test_dir = param_dict['test_dir']
self.model_name = param_dict['model_name']
self.context_representation = param_dict['context_representation']
self.pretrain_embeddings = param_dict['pretrain_embeddings']
self.target_representation = param_dict['target_representation']
self.include_target_one_hot = param_dict['include_target_one_hot']
self.max_pooling = param_dict['max_pooling']
self.std_token_len = param_dict['std_token_len']
self.nb_left_tokens = param_dict['nb_left_tokens']
self.left_char_len = param_dict['left_char_len']
self.nb_right_tokens = param_dict['nb_right_tokens']
self.right_char_len = param_dict['right_char_len']
self.nb_epochs = param_dict['nb_epochs']
self.batch_size = param_dict['batch_size']
self.nb_filters = param_dict['nb_filters']
self.filter_length = param_dict['filter_length']
self.nb_dense_dims = param_dict['nb_dense_dims']
self.embedding_dims = param_dict['embedding_dims']
self.min_train_token_count = param_dict['min_train_token_count']
self.model_path = os.sep.join((MODELS_DIR, self.model_name))
if not os.path.isdir(self.model_path):
os.mkdir(self.model_path)
print(param_dict)
# make sure that we can reproduce parametrization in case new model is trained:
if not os.path.exists(os.sep.join((self.model_path, 'config.txt'))):
shutil.copy(config_path, os.sep.join((self.model_path, 'config.txt')))
def load_data(self, nb_instances, label_idxs):
self.train_tokens, self.train_labels = \
utils.load_data_dir(data_dir = self.train_dir,
nb_instances = nb_instances,
label_idxs=label_idxs)
self.train_token_set = set(self.train_tokens) # for evaluation purposes (known vs unknown)
if self.dev_dir:
self.dev_tokens, self.dev_labels = \
utils.load_data_dir(data_dir = self.dev_dir,
nb_instances = nb_instances,
label_idxs=label_idxs)
if self.test_dir:
self.test_tokens, self.test_labels = \
utils.load_data_dir(data_dir = self.test_dir,
nb_instances = nb_instances,
label_idxs=label_idxs)
def encode_labels(self, load_pickles=False):
# ignore boundary markers
self.train_labels = [l for l in self.train_labels if l not in ("@", "$")]
if self.dev_dir:
self.dev_labels = [l for l in self.dev_labels if l not in ("@", "$")]
if self.test_dir:
self.test_labels = [l for l in self.test_labels if l not in ("@", "$")]
if not load_pickles:
# fit a labelencoder on all labels:
self.label_encoder = LabelEncoder()
lbls = self.train_labels
if self.dev_dir:
lbls.extend(self.dev_labels)
if self.test_dir:
lbls.extend(self.test_labels)
self.label_encoder.fit(lbls)
# pickle the label encoder:
with open(os.sep.join((self.model_path, 'label_encoder.p')), 'wb') as f:
pickle.dump(self.label_encoder, f)
else:
self.label_encoder = pickle.load(open(os.sep.join((self.model_path, 'label_encoder.p')), 'rb'))
# transform labels to int representation:
self.train_int_labels = self.label_encoder.transform(self.train_labels)
if self.dev_dir:
self.dev_int_labels = self.label_encoder.transform(self.dev_labels)
if self.test_dir:
self.test_int_labels = self.label_encoder.transform(self.test_labels)
print("> nb distinct train labels:", max(self.train_int_labels)+1)
# convert labels to one-hot represenation for cross-entropy:
self.train_y = np_utils.to_categorical(self.train_int_labels,
len(self.label_encoder.classes_))
if self.dev_dir:
self.dev_y = np_utils.to_categorical(self.dev_int_labels,
len(self.label_encoder.classes_))
if self.test_dir:
self.test_y = np_utils.to_categorical(self.test_int_labels,
len(self.label_encoder.classes_))
def vectorize_instances(self, tokens):
left_X, tokens_X, right_X, target_one_hots = [], [], [], []
filler = np.zeros(len(self.train_token_index))
for token_idx, token in enumerate(tokens):
if token in ("@", "$"):
continue # ignore boundary markers:
if self.include_target_one_hot:
try:
target_one_hots.append([self.train_token_index[token]])
except KeyError:
target_one_hots.append([0])
# vectorize target token:
if self.target_representation in \
('flat_characters', 'lstm_characters',\
'flat_convolution', 'lstm_convolution'):
tokens_X.append(utils.vectorize_charseq(seq=token,
char_vector_dict=self.train_char_vector_dict,
std_seq_len=self.std_token_len))
elif target_representation == 'embedding':
try:
tokens_X.append([self.train_token_index[token]])
except KeyError:
tokens_X.append([0])
# vectorize context:
if self.context_representation in \
('flat_characters', 'lstm_characters',\
'flat_convolution', 'lstm_convolution'):
# vectorize left context:
left_context = [tokens[token_idx-(t+1)] for t in range(self.nb_left_tokens)
if token_idx-(t+1) >= 0][::-1]
left_str = " ".join(left_context)
left_X.append(utiels.vectorize_charseq(seq=left_str,
char_vector_dict=self.train_char_vector_dict,
std_seq_len=self.left_char_len))
# vectorize right context:
right_str = " ".join([tokens[token_idx+t+1] for t in range(self.nb_right_tokens)
if token_idx+t+1 < len(tokens)])
right_X.append(utils.vectorize_charseq(seq=right_str,
char_vector_dict=self.train_char_vector_dict,
std_seq_len=self.right_char_len))
elif self.context_representation in ('flat_embeddings', 'lstm_embeddings'):
# vectorize left context:
left_context_tokens = [tokens[token_idx-(t+1)]\
for t in range(self.nb_left_tokens)\
if token_idx-(t+1) >= 0][::-1]
idxs = []
if left_context_tokens:
idxs = [self.train_token_index[t]\
if t in self.train_token_index\
else 0 for t in left_context_tokens]
while len(idxs) < self.nb_left_tokens:
idxs = [0] + idxs
left_X.append(idxs)
# vectorize right context:
right_context_tokens = [tokens[token_idx+t+1]\
for t in range(self.nb_right_tokens)\
if token_idx+t+1 < len(tokens)]
idxs = []
if right_context_tokens:
idxs = [self.train_token_index[t]\
if t in self.train_token_index\
else 0 for t in right_context_tokens]
while len(idxs) < self.nb_right_tokens:
idxs.append(0)
right_X.append(idxs)
tokens_X = np.asarray(tokens_X)
left_X = np.asarray(left_X)
right_X = np.asarray(right_X)
target_one_hots = np.asarray(target_one_hots)
return left_X, tokens_X, right_X, target_one_hots
def vectorize_datasets(self, load_pickles=False):
if not load_pickles:
# fit dicts etc. on train data
self.train_char_vector_dict = utils.get_char_vector_dict(self.train_tokens)
self.train_token_index = utils.get_train_token_index(self.train_tokens,
min_count=self.min_train_token_count)
# dump fitted objects for reuse:
# pickle the label encoder:
with open(os.sep.join((self.model_path, 'train_char_vector_dict.p')), 'wb') as f:
pickle.dump(self.train_char_vector_dict, f)
with open(os.sep.join((self.model_path, 'train_token_index.p')), 'wb') as f:
pickle.dump(self.train_token_index, f)
else:
self.train_char_vector_dict = pickle.load(open(os.sep.join((self.model_path, 'train_char_vector_dict.p')), 'rb'))
self.train_token_index = pickle.load(open(os.sep.join((self.model_path, 'train_token_index.p')), 'rb'))
# transform training, dev and test data:
self.train_left_X, self.train_tokens_X,\
self.train_right_X, self.train_target_one_hots = \
self.vectorize_instances(tokens=self.train_tokens)
if self.dev_dir:
self.dev_left_X, self.dev_tokens_X,\
self.dev_right_X, self.dev_target_one_hots = \
self.vectorize_instances(tokens=self.dev_tokens)
if self.test_dir:
self.test_left_X, self.test_tokens_X,\
self.test_right_X, self.test_target_one_hots = \
self.vectorize_instances(tokens=self.test_tokens)
def set_model(self, load_pickles=False):
# get embeddings if necessary:
self.embeddings = None
if self.pretrain_embeddings and self.context_representation != 'None':
if not load_pickles:
pretrainer = EmbeddingsPretrainer(tokens=self.train_tokens,
size=self.embedding_dims,
min_count=self.min_train_token_count)
vocab = [k for k,v in sorted(self.train_token_index.items(),\
key=itemgetter(1))]
self.embeddings = pretrainer.get_weights(vocab)
# dump embeddings:
with open(os.sep.join((self.model_path, 'embeddings.p')), 'wb') as f:
pickle.dump(self.embeddings, f)
# visualize etc:
pretrainer.plot_mfi(outputfile=os.sep.join((self.model_path, 'embeddings.pdf')))
pretrainer.most_similar(outputfile=os.sep.join((self.model_path, 'neighbors.txt')))
else:
self.embeddings = pickle.load(open(os.sep.join((self.model_path, 'embeddings.p')), 'rb'))
self.model = build_model(std_token_len=self.std_token_len,
left_char_len=self.left_char_len,
right_char_len=self.right_char_len,
filter_length=self.filter_length,
nb_filters=self.nb_filters,
nb_dense_dims=self.nb_dense_dims,
nb_left_tokens=self.nb_left_tokens,
nb_right_tokens=self.nb_right_tokens,
nb_labels=len(self.label_encoder.classes_),
char_vector_dict=self.train_char_vector_dict,
train_token_index=self.train_token_index,
target_representation=self.target_representation,
context_representation=self.context_representation,
embedding_dims=self.embedding_dims,
include_target_one_hot=self.include_target_one_hot,
max_pooling=self.max_pooling,
pretrain_embeddings=self.pretrain_embeddings,
embeddings=self.embeddings)
def get_baseline(self):
# Levenshtein baseline:
if self.dev_dir:
print("+++ BASELINE SCORE (dev)")
all_acc, known_acc, unknown_acc, proportion_unknown = utils.baseline(train_tokens = self.train_tokens,
train_labels = self.train_labels,
test_tokens = self.dev_tokens,
test_labels = self.dev_labels)
print("\t - all acc:\t{:.2%}".format(all_acc))
print("\t - known acc:\t{:.2%}".format(known_acc))
print("\t - unknown acc:\t{:.2%}".format(unknown_acc))
print("\t - proportion unknown:\t{:.2%}".format(proportion_unknown))
print("+++ BASELINE SCORE (test)")
all_acc, known_acc, unknown_acc, proportion_unknown = utils.baseline(train_tokens = self.train_tokens,
train_labels = self.train_labels,
test_tokens = self.test_tokens,
test_labels = self.test_labels)
print("\t - all acc:\t{:.2%}".format(all_acc))
print("\t - known acc:\t{:.2%}".format(known_acc))
print("\t - unknown acc:\t{:.2%}".format(unknown_acc))
print("\t - proportion unknown:\t{:.2%}".format(proportion_unknown))
def inspect_filters(self):
with open(os.sep.join((self.model_path, 'filters_repr.txt')),\
'wt') as f:
for weights in self.model.get_weights():
if len(weights.shape) == 4: # first conv layer
weight_tensor = weights
# get shape info:
nb_filters = weight_tensor.shape[0]
nb_chars = weight_tensor.shape[1]
filter_size = weight_tensor.shape[2]
chars = list('X'*len(self.train_char_vector_dict))
for char, vector in self.train_char_vector_dict.items():
chars[np.argmax(vector)] = char
for filter_idx, filter_ in enumerate(weight_tensor[:50]):
f.write('\t- filter '+str(filter_idx+1)+'\n')
filter_ = np.squeeze(filter_).transpose()
for position, position_weights in enumerate(filter_):
info = []
for score, char in sorted(zip(position_weights, chars), reverse=True)[:4]:
sc = "{:.3}".format(score)
info.append(char+' : '+sc)
f.write('\t\t+ pos '+str(position+1)+' > '+('\t|\t'.join(info))+'\n')
def load_weights(self):
self.model.load_weights(os.sep.join((self.model_path, 'weights.hdf5')))
def fit(self):
train_losses, dev_losses = [], []
train_accs, dev_accs = [], []
dev_known_accs, dev_unknown_accs = [], []
for e in range(self.nb_epochs):
if self.context_representation != 'None':
print("-> epoch ", e+1, "...")
d = {'left_input': self.train_left_X,
'target_input': self.train_tokens_X,
'right_input': self.train_right_X,
'target_one_hot_input': self.train_target_one_hots,
'label_output': self.train_y,
}
self.model.fit(data=d,
nb_epoch = 1,
batch_size = self.batch_size)
print("+++ TRAIN SCORE")
train_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(train_loss))
train_losses.append(train_loss)
train_predictions = self.model.predict({'left_input': self.train_left_X,
'target_input': self.train_tokens_X,
'target_one_hot_input': self.train_target_one_hots,
'right_input': self.train_right_X,
},
batch_size = self.batch_size)
train_all_acc, _, _ = utils.accuracies(predictions = train_predictions,
gold_labels = self.train_int_labels,
test_tokens = self.train_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(train_all_acc))
train_accs.append(train_all_acc)
if self.dev_dir:
print("+++ DEV SCORE")
d = {'left_input': self.dev_left_X,
'target_input': self.dev_tokens_X,
'right_input': self.dev_right_X,
'target_one_hot_input': self.dev_target_one_hots,
}
dev_predictions = self.model.predict(data=d, batch_size = self.batch_size)
d['label_output'] = self.dev_y
dev_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(dev_loss))
dev_losses.append(dev_loss)
dev_all_acc, dev_known_acc, dev_unknown_acc = utils.accuracies(predictions = dev_predictions,
gold_labels = self.dev_int_labels,
test_tokens = self.dev_tokens,
train_token_set = self.train_token_set)
dev_accs.append(dev_all_acc)
dev_known_accs.append(dev_known_acc)
dev_unknown_accs.append(dev_unknown_acc)
print("\t - all acc:\t{:.2%}".format(dev_all_acc))
print("\t - known acc:\t{:.2%}".format(dev_known_acc))
print("\t - unknown acc:\t{:.2%}".format(dev_unknown_acc))
if self.test_dir:
print("+++ TEST SCORE")
test_predictions = self.model.predict({'left_input': self.test_left_X,
'target_input': self.test_tokens_X,
'right_input': self.test_right_X,
'target_one_hot_input': self.test_target_one_hots,
},
batch_size = self.batch_size)
test_all_acc, test_known_acc, test_unknown_acc = utils.accuracies(predictions = test_predictions,
gold_labels = self.test_int_labels,
test_tokens = self.test_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(test_all_acc))
print("\t - known acc:\t{:.2%}".format(test_known_acc))
print("\t - unknown acc:\t{:.2%}".format(test_unknown_acc))
elif self.context_representation == 'None':
print("-> epoch ", e+1, "...")
d = {'target_input': self.train_tokens_X,
'target_one_hot_input': self.train_target_one_hots,
'label_output': self.train_y,
}
self.model.fit(data=d,
nb_epoch = 1,
batch_size = self.batch_size)
print("+++ TRAIN SCORE")
train_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(train_loss))
train_losses.append(train_loss)
train_predictions = self.model.predict({'target_input': self.train_tokens_X,
'target_one_hot_input': self.train_target_one_hots},
batch_size = self.batch_size)
train_all_acc, _, _ = utils.accuracies(predictions = train_predictions,
gold_labels = self.train_int_labels,
test_tokens = self.train_tokens,
train_token_set = self.train_token_set)
train_accs.append(train_all_acc)
print("\t - all acc:\t{:.2%}".format(train_all_acc))
if self.dev_dir:
print("+++ DEV SCORE")
d = {'target_input': self.dev_tokens_X,
'target_one_hot_input': self.dev_target_one_hots,
}
dev_predictions = self.model.predict(data = d,
batch_size = self.batch_size)
d['label_output'] = self.dev_y
dev_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
dev_losses.append(dev_loss)
print("\t - loss:\t{:.3}".format(dev_loss))
dev_all_acc, dev_known_acc, dev_unknown_acc = utils.accuracies(predictions = dev_predictions,
gold_labels = self.dev_int_labels,
test_tokens = self.dev_tokens,
train_token_set = self.train_token_set)
dev_accs.append(dev_all_acc)
dev_known_accs.append(dev_known_acc)
dev_unknown_accs.append(dev_unknown_acc)
print("\t - all acc:\t{:.2%}".format(dev_all_acc))
print("\t - known acc:\t{:.2%}".format(dev_known_acc))
print("\t - unknown acc:\t{:.2%}".format(dev_unknown_acc))
if self.test_dir:
print("+++ TEST SCORE")
test_predictions = self.model.predict({'target_input': self.test_tokens_X,
'target_one_hot_input': self.test_target_one_hots},
batch_size = self.batch_size)
test_all_acc, test_known_acc, test_unknown_acc = utils.accuracies(predictions = test_predictions,
gold_labels = self.test_int_labels,
test_tokens = self.test_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(test_all_acc))
print("\t - known acc:\t{:.2%}".format(test_known_acc))
print("\t - unknown acc:\t{:.2%}".format(test_unknown_acc))
# keep track of results:
plt.clf()
f, ax = plt.subplots(1,1)
plt.tick_params(axis='both', which='major', labelsize=6)
plt.tick_params(axis='both', which='minor', labelsize=6)
plt.xlabel('Epoch')
plt.ylabel('Loss')
ax.plot(train_losses, c='b')
if self.dev_dir:
ax.plot(dev_losses, c='g')
ax2 = plt.gca().twinx()
plt.tick_params(axis='both', which='major', labelsize=6)
plt.tick_params(axis='both', which='minor', labelsize=6)
ax2.plot(train_accs, label='Train Acc (all)', c='r')
if self.dev_dir:
ax2.plot(dev_accs, label='Devel Acc (all)', c='c')
ax2.plot(dev_known_accs, label='Devel Acc (kno)', c='m')
ax2.plot(dev_unknown_accs, label='Devel Acc (unk)', c='y')
# hack: add dummy legend items:
ax2.plot(0, 0, label='Train Loss', c='b')
if self.dev_dir:
ax2.plot(0, 0, label='Devel Loss', c='g')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0., fontsize = 'x-small')
ax2.set_ylabel('Accuracy')
plt.savefig(os.sep.join((self.model_path, 'losses.pdf')))
with open(os.sep.join((self.model_path, 'losses.tsv')), 'w') as f:
if self.dev_dir:
f.write('idx\ttrain\tdev\n')
for i in range(len(train_losses)):
f.write('\t'.join((str(i+1), str(train_losses[i]), str(dev_losses[i])))+'\n')
else:
f.write('idx\ttrain\n')
for i in range(len(train_losses)):
f.write('\t'.join((str(i+1), str(train_losses[i])))+'\n')
with open(os.sep.join((self.model_path, 'accs.tsv')), 'w') as f:
if self.dev_dir:
f.write('idx\ttrain_all\tdev_all\tdev_known\tdev_unknown\n')
for i in range(len(train_losses)):
f.write('\t'.join((str(i+1), str(train_accs[i]),\
str(dev_accs[i]), str(dev_known_accs[i]),\
str(dev_unknown_accs[i])))+'\n')
else:
f.write('idx\ttrain_all\n')
for i in range(len(train_losses)):
f.write('\t'.join((str(i+1), str(train_accs[i])))+'\n')
# save
self.model.save_weights(os.sep.join((self.model_path, 'weights.hdf5')),\
overwrite=True)
# visualize:
self.inspect_filters()
def test(self):
if self.context_representation != 'None':
d = {'left_input': self.train_left_X,
'target_input': self.train_tokens_X,
'right_input': self.train_right_X,
'target_one_hot_input': self.train_target_one_hots,
'label_output': self.train_y,
}
print("+++ TRAIN SCORE")
train_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(train_loss))
train_predictions = self.model.predict({'left_input': self.train_left_X,
'target_input': self.train_tokens_X,
'target_one_hot_input': self.train_target_one_hots,
'right_input': self.train_right_X,
},
batch_size = self.batch_size)
train_all_acc, _, _ = utils.accuracies(predictions = train_predictions,
gold_labels = self.train_int_labels,
test_tokens = self.train_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(train_all_acc))
if self.dev_dir:
print("+++ DEV SCORE")
d = {'left_input': self.dev_left_X,
'target_input': self.dev_tokens_X,
'right_input': self.dev_right_X,
'target_one_hot_input': self.dev_target_one_hots,
}
dev_predictions = self.model.predict(data=d, batch_size = self.batch_size)
d['label_output'] = self.dev_y
dev_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(dev_loss))
dev_all_acc, dev_known_acc, dev_unknown_acc = utils.accuracies(predictions = dev_predictions,
gold_labels = self.dev_int_labels,
test_tokens = self.dev_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(dev_all_acc))
print("\t - known acc:\t{:.2%}".format(dev_known_acc))
print("\t - unknown acc:\t{:.2%}".format(dev_unknown_acc))
if self.test_dir:
print("+++ TEST SCORE")
test_predictions = self.model.predict({'left_input': self.test_left_X,
'target_input': self.test_tokens_X,
'right_input': self.test_right_X,
'target_one_hot_input': self.test_target_one_hots,
},
batch_size = self.batch_size)
test_all_acc, test_known_acc, test_unknown_acc = utils.accuracies(predictions = test_predictions,
gold_labels = self.test_int_labels,
test_tokens = self.test_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(test_all_acc))
print("\t - known acc:\t{:.2%}".format(test_known_acc))
print("\t - unknown acc:\t{:.2%}".format(test_unknown_acc))
elif self.context_representation == 'None':
d = {'target_input': self.train_tokens_X,
'target_one_hot_input': self.train_target_one_hots,
'label_output': self.train_y,
}
print("+++ TRAIN SCORE")
train_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(train_loss))
train_predictions = self.model.predict({'target_input': self.train_tokens_X,
'target_one_hot_input': self.train_target_one_hots},
batch_size = self.batch_size)
train_all_acc, _, _ = utils.accuracies(predictions = train_predictions,
gold_labels = self.train_int_labels,
test_tokens = self.train_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(train_all_acc))
if self.dev_dir:
print("+++ DEV SCORE")
d = {'target_input': self.dev_tokens_X,
'target_one_hot_input': self.dev_target_one_hots,
}
dev_predictions = self.model.predict(data = d,
batch_size = self.batch_size)
d['label_output'] = self.dev_y
dev_loss = self.model.evaluate(data=d, batch_size = self.batch_size)
print("\t - loss:\t{:.3}".format(dev_loss))
dev_all_acc, dev_known_acc, dev_unknown_acc = utils.accuracies(predictions = dev_predictions,
gold_labels = self.dev_int_labels,
test_tokens = self.dev_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(dev_all_acc))
print("\t - known acc:\t{:.2%}".format(dev_known_acc))
print("\t - unknown acc:\t{:.2%}".format(dev_unknown_acc))
if self.test_dir:
print("+++ TEST SCORE")
test_predictions = self.model.predict({'target_input': self.test_tokens_X,
'target_one_hot_input': self.test_target_one_hots},
batch_size = self.batch_size)
test_all_acc, test_known_acc, test_unknown_acc = utils.accuracies(predictions = test_predictions,
gold_labels = self.test_int_labels,
test_tokens = self.test_tokens,
train_token_set = self.train_token_set)
print("\t - all acc:\t{:.2%}".format(test_all_acc))
print("\t - known acc:\t{:.2%}".format(test_known_acc))
print("\t - unknown acc:\t{:.2%}".format(test_unknown_acc))
| mit |
wazeerzulfikar/scikit-learn | sklearn/tests/test_metaestimators.py | 30 | 5040 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
from sklearn.exceptions import NotFittedError
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
check_is_fitted(self, 'coef_')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises a NotFittedError
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
sklearn-theano/sklearn-theano | sklearn_theano/sandbox/logistic_regression.py | 9 | 1526 | import numpy as np
from theano import tensor as T
import theano
from sklearn.datasets import make_classification
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
rng = np.random.RandomState(1999)
X, y = make_classification(n_samples=400, n_features=25, n_informative=10,
n_classes=2, n_clusters_per_class=2,
random_state=1999)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8)
n_samples, n_features = X_train.shape
x = T.matrix('x')
y = T.vector('y')
w = theano.shared(rng.randn(n_features), name='w')
b = theano.shared(0., name='b')
print("Initial model")
print(w.get_value(), b.get_value())
learning_rate = 0.01
reg = .1
n_iter = 10000
prob = 1 / (1 + T.exp(-T.dot(x, w) - b))
pred = prob > 0.5
loss = -y * T.log(prob) - (1 - y) * T.log(1 - prob)
# l2
# penalty = reg * (w ** 2).sum()
# l1
penalty = reg * abs(w).sum()
# l0
# penalty = reg * T.neq(w, 0).sum()
cost = loss.mean() + penalty
gw, gb = T.grad(cost, [w, b])
train = theano.function(inputs=[x, y], outputs=[pred, loss],
updates=((w, w - learning_rate * gw),
(b, b - learning_rate * gb)))
predict = theano.function(inputs=[x], outputs=pred)
for i in range(n_iter):
pred, err = train(X_train, y_train)
print("Final model:")
print(w.get_value(), b.get_value())
print("Report:")
y_pred = predict(X_test)
report = classification_report(y_test, y_pred)
print(report)
| bsd-3-clause |
jwiggins/scikit-image | skimage/viewer/canvastools/recttool.py | 43 | 8886 | from matplotlib.widgets import RectangleSelector
from ...viewer.canvastools.base import CanvasToolBase
from ...viewer.canvastools.base import ToolHandles
__all__ = ['RectangleTool']
class RectangleTool(CanvasToolBase, RectangleSelector):
"""Widget for selecting a rectangular region in a plot.
After making the desired selection, press "Enter" to accept the selection
and call the `on_enter` callback function.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the rectangle extents as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
extents : tuple
Rectangle extents: (xmin, xmax, ymin, ymax).
Examples
----------
>>> from skimage import data
>>> from skimage.viewer import ImageViewer
>>> from skimage.viewer.canvastools import RectangleTool
>>> from skimage.draw import line
>>> from skimage.draw import set_color
>>> viewer = ImageViewer(data.coffee()) # doctest: +SKIP
>>> def print_the_rect(extents):
... global viewer
... im = viewer.image
... coord = np.int64(extents)
... [rr1, cc1] = line(coord[2],coord[0],coord[2],coord[1])
... [rr2, cc2] = line(coord[2],coord[1],coord[3],coord[1])
... [rr3, cc3] = line(coord[3],coord[1],coord[3],coord[0])
... [rr4, cc4] = line(coord[3],coord[0],coord[2],coord[0])
... set_color(im, (rr1, cc1), [255, 255, 0])
... set_color(im, (rr2, cc2), [0, 255, 255])
... set_color(im, (rr3, cc3), [255, 0, 255])
... set_color(im, (rr4, cc4), [0, 0, 0])
... viewer.image=im
>>> rect_tool = RectangleTool(viewer, on_enter=print_the_rect) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, rect_props=None):
self._rect = None
props = dict(edgecolor=None, facecolor='r', alpha=0.15)
props.update(rect_props if rect_props is not None else {})
if props['edgecolor'] is None:
props['edgecolor'] = props['facecolor']
RectangleSelector.__init__(self, manager.ax, lambda *args: None,
rectprops=props)
CanvasToolBase.__init__(self, manager, on_move=on_move,
on_enter=on_enter, on_release=on_release)
# Events are handled by the viewer
try:
self.disconnect_events()
except AttributeError:
# disconnect the events manually (hack for older mpl versions)
[self.canvas.mpl_disconnect(i) for i in range(10)]
# Alias rectangle attribute, which is initialized in RectangleSelector.
self._rect = self.to_draw
self._rect.set_animated(True)
self.maxdist = maxdist
self.active_handle = None
self._extents_on_press = None
if on_enter is None:
def on_enter(extents):
print("(xmin=%.3g, xmax=%.3g, ymin=%.3g, ymax=%.3g)" % extents)
self.callback_on_enter = on_enter
props = dict(mec=props['edgecolor'])
self._corner_order = ['NW', 'NE', 'SE', 'SW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc, marker_props=props)
self._edge_order = ['W', 'N', 'E', 'S']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=props)
self.artists = [self._rect,
self._corner_handles.artist,
self._edge_handles.artist]
self.manager.add_tool(self)
@property
def _rect_bbox(self):
if not self._rect:
return 0, 0, 0, 0
x0 = self._rect.get_x()
y0 = self._rect.get_y()
width = self._rect.get_width()
height = self._rect.get_height()
return x0, y0, width, height
@property
def corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
@property
def edge_centers(self):
"""Midpoint of rectangle edges from left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
# Update displayed rectangle
self._rect.set_x(xmin)
self._rect.set_y(ymin)
self._rect.set_width(xmax - xmin)
self._rect.set_height(ymax - ymin)
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
self.set_visible(True)
self.redraw()
def on_mouse_release(self, event):
if event.button != 1:
return
if not self.ax.in_axes(event):
self.eventpress = None
return
RectangleSelector.release(self, event)
self._extents_on_press = None
# Undo hiding of rectangle and redraw.
self.set_visible(True)
self.redraw()
self.callback_on_release(self.geometry)
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self._set_active_handle(event)
if self.active_handle is None:
# Clear previous rectangle before drawing new rectangle.
self.set_visible(False)
self.redraw()
self.set_visible(True)
RectangleSelector.press(self, event)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event"""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
# Set active handle as closest handle, if mouse click is close enough.
if c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, event.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, event.ydata
self._extents_on_press = x1, x2, y1, y2
def on_move(self, event):
if self.eventpress is None or not self.ax.in_axes(event):
return
if self.active_handle is None:
# New rectangle
x1 = self.eventpress.xdata
y1 = self.eventpress.ydata
x2, y2 = event.xdata, event.ydata
else:
x1, x2, y1, y2 = self._extents_on_press
if self.active_handle in ['E', 'W'] + self._corner_order:
x2 = event.xdata
if self.active_handle in ['N', 'S'] + self._corner_order:
y2 = event.ydata
self.extents = (x1, x2, y1, y2)
self.callback_on_move(self.geometry)
@property
def geometry(self):
return self.extents
if __name__ == '__main__': # pragma: no cover
from ...viewer import ImageViewer
from ... import data
viewer = ImageViewer(data.camera())
rect_tool = RectangleTool(viewer)
viewer.show()
print("Final selection:")
rect_tool.callback_on_enter(rect_tool.extents)
| bsd-3-clause |
robin-lai/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
jergosh/slr_pipeline | bin/make_html.py | 1 | 2255 | import os
from os import path
import pandas
import sys
import argparse
from subprocess import Popen
import glob
preamble = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<title>title</title>
<link rel="stylesheet" type="text/css" href="style.css"/>
<script type="text/javascript" src="script.js"></script>
</head>
<body>
"""
postamble = """
</body>
</html>
"""
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('--resdir', metavar='result_dir', type=str, required=True)
argparser.add_argument('--treedir', metavar='tree_dir', type=str, required=True)
argparser.add_argument('--alndir', metavar='aln_dir', type=str, required=True)
argparser.add_argument('--signature', metavar='filename', type=str, default="ENS*.res")
argparser.add_argument('--outalns', metavar='output_aln', type=str, required=True)
argparser.add_argument('--outtrees', metavar='output_tree', type=str, required=True)
args = argparser.parse_args()
tree_content = [ preamble ]
aln_content = [ preamble ]
for resfile in glob.glob(path.join(args.resdir, '*', args.signature)):
basename = path.basename(resfile).rpartition('.')[0]
fields = basename.split('_')
ens = fields[0]
dataset = '_'.join(fields[1:3])
print ens, dataset
tree = path.join("trees", fields[1][:2], dataset+'.nh')
aln = path.join("aln", fields[1][:2], dataset+'_prank.best.fas')
tree_link = "<a href=\"" + tree + "\">" + ens + "</a>"
aln_link = "<a href=\"" + aln + "\">" + ens + "</a>"
tree_content.append("<p>"+tree_link+"</p>")
aln_content.append("<p>"+aln_link+"</p>")
tree_content.append(postamble)
aln_content.append(postamble)
tree_content = '\n'.join(tree_content)
aln_content = '\n'.join(aln_content)
tree_file = open(args.outtrees, 'w')
aln_file = open(args.outalns, 'w')
print >>tree_file, tree_content
print >>aln_file, aln_content
if __name__ == "__main__":
main()
| gpl-2.0 |
bsipocz/scikit-image | skimage/external/tifffile/tifffile.py | 19 | 173866 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
from . import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = u'{"shape": %s}' % str(list(data.shape)) # doctest: +SKIP
>>> imsave('temp.tif', data, compress=6, # doctest: +SKIP
... extratags=[(270, 's', 0, description, True)])
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume:
# use tiles to save volume data
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, 1) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
# use one strip or tile per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts, offset_format, shape[1], strip_byte_counts)
addtag(tag_offsets, offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0) # doctest: +SKIP
>>> im.shape # doctest: +SKIP
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif']) # doctest: +SKIP
>>> ims.shape # doctest: +SKIP
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif: # doctest: +SKIP
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif") # doctest: +SKIP
>>> tifs.shape, tifs.axes # doctest: +SKIP
((2, 100), 'CT')
>>> data = tifs.asarray() # doctest: +SKIP
>>> data.shape # doctest: +SKIP
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function.
This is used to replace local functions with functions from another
(usually compiled) module, if available.
Parameters
----------
module_function : str
Module and function path string (e.g. numpy.ones)
package : str, optional
The parent package of the module
warn : bool, optional
Whether to warn when wrapping fails
Returns
-------
func : function
Wrapped function, hopefully calling a function in another module.
Example
-------
>>> @_replace_by('_tifffile.decodepackbits')
... def decodepackbits(encoded):
... raise NotImplementedError
"""
def decorate(func, module_function=module_function, warn=warn):
try:
modname, function = module_function.split('.')
if package is None:
full_name = modname
else:
full_name = package + '.' + modname
if modname == '_tifffile':
func = getattr(_tifffile, function)
else:
module = __import__(full_name, fromlist=[modname])
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00') # doctest: +SKIP
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00') # doctest: +SKIP
b'string\\x00string\\n'
>>> stripascii(b'\\x00') # doctest: +SKIP
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
kinglogxzl/rqalpha | build/lib/rqalpha/examples/golden_cross.py | 2 | 2236 | # 可以自己import我们平台支持的第三方python模块,比如pandas、numpy等。
import talib
import numpy as np
import math
import pandas
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
context.s1 = "000001.XSHE"
# 设置这个策略当中会用到的参数,在策略中可以随时调用,这个策略使用长短均线,我们在这里设定长线和短线的区间,在调试寻找最佳区间的时候只需要在这里进行数值改动
context.SHORTPERIOD = 20
context.LONGPERIOD = 120
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
# 开始编写你的主要的算法逻辑
# bar_dict[order_book_id] 可以拿到某个证券的bar信息
# context.portfolio 可以拿到现在的投资组合状态信息
# 使用order_shares(id_or_ins, amount)方法进行落单
# TODO: 开始编写你的算法吧!
# 因为策略需要用到均线,所以需要读取历史数据
prices = history(context.LONGPERIOD+1,'1d','close')[context.s1].values
# 使用talib计算长短两根均线,均线以array的格式表达
short_avg = talib.SMA(prices,context.SHORTPERIOD)
long_avg = talib.SMA(prices,context.LONGPERIOD)
plot("short avg",short_avg[-1])
plot("long avg",long_avg[-1])
# 计算现在portfolio中股票的仓位
curPosition = context.portfolio.positions[context.s1].quantity
# 计算现在portfolio中的现金可以购买多少股票
shares = context.portfolio.cash/bar_dict[context.s1].close
# 如果短均线从上往下跌破长均线,也就是在目前的bar短线平均值低于长线平均值,而上一个bar的短线平均值高于长线平均值
if short_avg[-1]-long_avg[-1]<0 and short_avg[-2]-long_avg[-2]>0 and curPosition>0:
#进行清仓
order_target_value(context.s1,0)
# 如果短均线从下往上突破长均线,为入场信号
if short_avg[-1]-long_avg[-1]>0 and short_avg[-2]-long_avg[-2]<0:
#满仓入股
order_shares(context.s1,shares)
| apache-2.0 |
cfjhallgren/shogun | examples/undocumented/python/graphical/interactive_svr_demo.py | 6 | 11220 | """
Shogun demo, based on PyQT Demo by Eli Bendersky
Christian Widmer
Soeren Sonnenburg
License: GPLv3
"""
import numpy
import sys, os, csv
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.colorbar import make_axes, Colorbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
from shogun import *
class Form(QMainWindow):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('SHOGUN interactive demo')
self.data = DataHolder()
self.series_list_model = QStandardItemModel()
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.on_show()
def load_file(self, filename=None):
filename = QFileDialog.getOpenFileName(self,
'Open a data file', '.', 'CSV files (*.csv);;All Files (*.*)')
if filename:
self.data.load_from_file(filename)
self.fill_series_list(self.data.series_names())
self.status_text.setText("Loaded " + filename)
def on_show(self):
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1, self.data.x2, 'bo')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
self.fill_series_list(self.data.get_stats())
def on_about(self):
msg = __doc__
QMessageBox.about(self, "About the demo", msg.strip())
def fill_series_list(self, names):
self.series_list_model.clear()
for name in names:
item = QStandardItem(name)
item.setCheckState(Qt.Unchecked)
item.setCheckable(False)
self.series_list_model.appendRow(item)
def onclick(self, event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%(event.button, event.x, event.y, event.xdata, event.ydata)
self.data.add_example(event.xdata, event.ydata)
self.on_show()
def clear(self):
self.data.clear()
self.on_show()
def enable_widgets(self):
kernel_name = self.kernel_combo.currentText()
if kernel_name == "LinearKernel":
self.sigma.setDisabled(True)
self.degree.setDisabled(True)
elif kernel_name == "PolynomialKernel":
self.sigma.setDisabled(True)
self.degree.setEnabled(True)
elif kernel_name == "GaussianKernel":
self.sigma.setEnabled(True)
self.degree.setDisabled(True)
def train_svm(self):
width = float(self.sigma.text())
degree = int(self.degree.text())
self.axes.clear()
self.axes.grid(True)
self.axes.plot(self.data.x1, self.data.x2, 'bo')
# train svm
labels = self.data.get_labels()
print type(labels)
lab = RegressionLabels(labels)
features = self.data.get_examples()
train = RealFeatures(features)
kernel_name = self.kernel_combo.currentText()
print "current kernel is %s" % (kernel_name)
if kernel_name == "LinearKernel":
gk = LinearKernel(train, train)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "PolynomialKernel":
gk = PolyKernel(train, train, degree, True)
gk.set_normalizer(IdentityKernelNormalizer())
elif kernel_name == "GaussianKernel":
gk = GaussianKernel(train, train, width)
cost = float(self.cost.text())
tubeeps = float(self.tubeeps.text())
print "cost", cost
svm = LibSVR(cost, tubeeps, gk, lab)
svm.train()
svm.set_epsilon(1e-2)
x=numpy.linspace(-5.0,5.0,100)
y=svm.apply(RealFeatures(numpy.array([x]))).get_labels()
self.axes.plot(x,y,'r-')
self.axes.set_xlim((-5,5))
self.axes.set_ylim((-5,5))
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
plot_frame = QWidget()
self.dpi = 100
self.fig = Figure((6.0, 6.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
cid = self.canvas.mpl_connect('button_press_event', self.onclick)
self.axes = self.fig.add_subplot(111)
self.cax = None
#self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
log_label = QLabel("Number of examples:")
self.series_list_view = QListView()
self.series_list_view.setModel(self.series_list_model)
cost_label = QLabel('C')
#self.cost = QSpinBox()#QLineEdit()
self.cost = QLineEdit()
self.cost.setText("1.0")
#self.cost.setMinimum(1)
spin_label2 = QLabel('tube')
self.tubeeps = QLineEdit()
self.tubeeps.setText("0.1")
spin_label3 = QLabel('sigma')
self.sigma = QLineEdit()
self.sigma.setText("1.2")
#self.sigma.setMinimum(1)
spin_label4 = QLabel('d')
self.degree = QLineEdit()
self.degree.setText("2")
#self.sigma.setMinimum(1)
spins_hbox = QHBoxLayout()
spins_hbox.addWidget(cost_label)
spins_hbox.addWidget(self.cost)
spins_hbox.addWidget(spin_label2)
spins_hbox.addWidget(self.tubeeps)
spins_hbox.addWidget(spin_label3)
spins_hbox.addWidget(self.sigma)
spins_hbox.addWidget(spin_label4)
spins_hbox.addWidget(self.degree)
spins_hbox.addStretch(1)
self.legend_cb = QCheckBox("Show Support Vectors")
self.legend_cb.setChecked(False)
self.show_button = QPushButton("&Train SVR")
self.connect(self.show_button, SIGNAL('clicked()'), self.train_svm)
self.clear_button = QPushButton("&Clear")
self.connect(self.clear_button, SIGNAL('clicked()'), self.clear)
self.kernel_combo = QComboBox()
self.kernel_combo.insertItem(-1, "GaussianKernel")
self.kernel_combo.insertItem(-1, "PolynomialKernel")
self.kernel_combo.insertItem(-1, "LinearKernel")
self.kernel_combo.maximumSize = QSize(300, 50)
self.connect(self.kernel_combo, SIGNAL("currentIndexChanged(QString)"), self.enable_widgets)
left_vbox = QVBoxLayout()
left_vbox.addWidget(self.canvas)
#left_vbox.addWidget(self.mpl_toolbar)
right0_vbox = QVBoxLayout()
right0_vbox.addWidget(log_label)
right0_vbox.addWidget(self.series_list_view)
#right0_vbox.addWidget(self.legend_cb)
right0_vbox.addStretch(1)
right2_vbox = QVBoxLayout()
right2_label = QLabel("Settings")
right2_vbox.addWidget(right2_label)
right2_vbox.addWidget(self.show_button)
right2_vbox.addWidget(self.kernel_combo)
right2_vbox.addLayout(spins_hbox)
right2_clearlabel = QLabel("Remove Data")
right2_vbox.addWidget(right2_clearlabel)
right2_vbox.addWidget(self.clear_button)
right2_vbox.addStretch(1)
right_vbox = QHBoxLayout()
right_vbox.addLayout(right0_vbox)
right_vbox.addLayout(right2_vbox)
hbox = QVBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(right_vbox)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
self.enable_widgets()
def create_status_bar(self):
self.status_text = QLabel("")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_action = self.create_action("&Load file",
shortcut="Ctrl+L", slot=self.load_file, tip="Load a file")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
class DataHolder(object):
""" Just a thin wrapper over a dictionary that holds integer
data series. Each series has a name and a list of numbers
as its data. The length of all series is assumed to be
the same.
The series can be read from a CSV file, where each line
is a separate series. In each series, the first item in
the line is the name, and the rest are data numbers.
"""
def __init__(self, filename=None):
self.clear()
self.load_from_file(filename)
def clear(self):
self.x1 = []
self.x2 = []
def get_stats(self):
num = len(self.x1)
str_num = "num examples: %i" % num
return (str_num, str_num)
def get_labels(self):
return numpy.array(self.x2, dtype=numpy.float64)
def get_examples(self):
num = len(self.x1)
examples = numpy.zeros((1,num))
for i in xrange(num):
examples[0,i] = self.x1[i]
return examples
def add_example(self, x1, x2):
self.x1.append(x1)
self.x2.append(x2)
def load_from_file(self, filename=None):
self.data = {}
self.names = []
if filename:
for line in csv.reader(open(filename, 'rb')):
self.names.append(line[0])
self.data[line[0]] = map(int, line[1:])
self.datalen = len(line[1:])
def series_names(self):
""" Names of the data series
"""
return self.names
def series_len(self):
""" Length of a data series
"""
return self.datalen
def series_count(self):
return len(self.data)
def get_series_data(self, name):
return self.data[name]
def main():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
main()
#~ dh = DataHolder('qt_mpl_data.csv')
#~ print dh.data
#~ print dh.get_series_data('1991 Sales')
#~ print dh.series_names()
#~ print dh.series_count()
| gpl-3.0 |
yeon3683/handpose | main_nyu.py | 1 | 1735 | import numpy
import matplotlib
matplotlib.use('Agg') # plot to file
import matplotlib.pyplot as plt
import os
import cPickle
import sys
from data.importers import NYUImporter
from data.dataset import NYUDataset
import cv2
if __name__ == '__main__':
#if not os.path.exists('./eval/'+eval_prefix+'/'):
# os.makedirs('./eval/'+eval_prefix+'/')
rng = numpy.random.RandomState(320)
print("create data")
di = NYUImporter('../data/NYU/')
Seq1 = di.loadSequence('train', shuffle=True, rng=rng)
trainSeqs = [Seq1]
Seq2_1 = di.loadSequence('test_1')
#Seq2_2 = di.loadSequence('test_2')
#testSeqs = [Seq2_1, Seq2_2]
testSeqs = [Seq2_1]
# create training data
trainDataSet = NYUDataset(trainSeqs)
train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train', normZeroOne=True)
mb = (train_data.nbytes) / (1024 * 1024)
print("data size: {}Mb".format(mb))
# create validation data
#valDataSet = NYUDataset(testSeqs)
#val_data, val_gt3D = valDataSet.imgStackDepthOnly('test_1')
# create test data
testDataSet = NYUDataset(testSeqs)
test_data1, test_gt3D1 = testDataSet.imgStackDepthOnly('test_1', normZeroOne=True)
#test_data2, test_gt3D2 = testDataSet.imgStackDepthOnly('test_2')
print train_gt3D.max(), test_gt3D1.max(), train_gt3D.min(), test_gt3D1.min()
print train_data.max(), test_data1.max(), train_data.min(), test_data1.min()
imgSizeW = train_data.shape[3]
imgSizeH = train_data.shape[2]
nChannels = train_data.shape[1]
for i in range(1, 10):
cv2.imshow('image', train_data[i][0])
cv2.imwrite("{}F.png".format(i),train_data[i][0]*256)
cv2.waitKey(100)
cv2.destroyAllWindows()
| gpl-3.0 |
hainm/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
aflaxman/scikit-learn | examples/classification/plot_classifier_comparison.py | 9 | 5219 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| bsd-3-clause |
siutanwong/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
svallaghe/libmesh | doc/statistics/cloc_libmesh.py | 1 | 8558 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import math
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# git checkout `git rev-list -n 1 --before="$my_date" master`
# cloc.pl src/*/*.C include/*/*.h
data = [
# 2003 - All data from archived svn repo
# '2003-01-10', 158, 29088, # SVN revision 4 - this is the first revision with trunk/libmesh
# '2003-01-20', 184, 28937, # SVN revision 11
# '2003-01-24', 198, 31158, # SVN revision 23
'2003-02-04', 198, 31344, # SVN revision 47
'2003-03-04', 243, 36036,
'2003-04-04', 269, 39946,
'2003-05-04', 275, 40941,
'2003-06-04', 310, 44090,
'2003-07-04', 319, 44445,
'2003-08-04', 322, 45225,
'2003-09-04', 325, 46762,
'2003-10-04', 327, 47151,
'2003-11-04', 327, 47152, # Up to now, all the include files were in the same directory
'2003-12-04', 327, 47184,
# 2004 - All data from archived svn repo
'2004-01-04', 339, 48437,
'2004-02-04', 343, 50455,
'2004-03-04', 347, 52198,
'2004-04-04', 358, 52515,
'2004-05-04', 358, 52653,
'2004-06-04', 369, 53953,
'2004-07-04', 368, 53981,
'2004-08-04', 371, 54316,
'2004-09-04', 371, 54510,
'2004-10-04', 375, 55785,
'2004-11-04', 375, 55880,
'2004-12-04', 384, 56612,
# 2005 - All data from archived svn repo
'2005-01-04', 385, 56674,
'2005-02-04', 406, 61034,
'2005-03-04', 406, 62423,
'2005-04-04', 403, 62595,
'2005-05-04', 412, 63540,
'2005-06-04', 416, 69619,
'2005-07-04', 425, 72092,
'2005-08-04', 425, 72445,
'2005-09-04', 429, 74148,
'2005-10-04', 429, 74263,
'2005-11-04', 429, 74486,
'2005-12-04', 429, 74629,
# 2006 - All data from archived svn repo
'2006-01-04', 429, 74161,
'2006-02-04', 429, 74165,
'2006-03-04', 429, 74170,
'2006-04-04', 429, 74864,
'2006-05-04', 433, 73847,
'2006-06-04', 438, 74681,
'2006-07-04', 454, 76954,
'2006-08-04', 454, 77464,
'2006-09-04', 454, 77843,
'2006-10-04', 454, 78051,
'2006-11-04', 463, 78683,
'2006-12-04', 463, 79057,
# 2007 - All data from archived svn repo
'2007-01-04', 463, 79149,
'2007-02-04', 475, 79344,
'2007-03-04', 479, 81416,
'2007-04-04', 479, 81468,
'2007-05-04', 481, 84312,
'2007-06-04', 481, 85565,
'2007-07-04', 482, 85924,
'2007-08-04', 485, 86248,
'2007-09-04', 487, 86481,
'2007-10-04', 497, 87926,
'2007-11-04', 502, 89687,
'2007-12-04', 512, 93523,
# 2008 - All data from archived svn repo
'2008-01-04', 512, 94263,
'2008-02-04', 515, 94557,
'2008-03-04', 526, 98127,
'2008-04-04', 526, 98256,
'2008-05-04', 531, 99715,
'2008-06-04', 531, 99963,
'2008-07-04', 538, 100839,
'2008-08-04', 542, 101682,
'2008-09-04', 548, 102163,
'2008-10-04', 556, 104185,
'2008-11-04', 558, 104535,
'2008-12-04', 565, 106318,
# 2009 - All data from archived svn repo
'2009-01-04', 565, 106340,
'2009-02-04', 579, 108431,
'2009-03-04', 584, 109050,
'2009-04-04', 584, 109922,
'2009-05-04', 589, 110821,
'2009-06-04', 591, 111094,
'2009-07-04', 591, 111571,
'2009-08-04', 591, 111555,
'2009-09-04', 591, 111746,
'2009-10-04', 591, 111920,
'2009-11-04', 595, 112993,
'2009-12-04', 597, 113744,
# 2010 - All data from archived svn repo
'2010-01-04', 598, 113840,
'2010-02-04', 600, 114378,
'2010-03-04', 602, 114981,
'2010-04-04', 603, 115509,
'2010-05-04', 603, 115821,
'2010-06-04', 603, 115875,
'2010-07-04', 627, 126159,
'2010-08-04', 627, 126217,
'2010-09-04', 628, 126078,
'2010-10-04', 642, 129417,
'2010-11-04', 643, 130045,
'2010-12-04', 648, 131363,
# 2011 - All data from archived svn repo
'2011-01-04', 648, 131644,
'2011-02-04', 648, 132105,
'2011-03-04', 658, 132950,
'2011-04-04', 661, 133643,
'2011-05-04', 650, 133958,
'2011-06-04', 662, 134447,
'2011-07-04', 667, 134938,
'2011-08-04', 679, 136338,
'2011-09-04', 684, 138165,
'2011-10-04', 686, 138627,
'2011-11-04', 690, 141876,
'2011-12-04', 690, 142096,
# 2012
'2012-01-04', 694, 142345,
'2012-02-04', 697, 142585,
'2012-03-04', 703, 146127,
'2012-04-04', 706, 147191,
'2012-05-04', 708, 148202,
'2012-06-04', 705, 148334,
'2012-07-04', 713, 150066,
'2012-08-04', 727, 152269,
'2012-09-04', 725, 152381,
'2012-10-04', 734, 155213, # cloc reports 1092 and 1094 files for Oct/Nov, Don't know what happened...
'2012-11-04', 743, 156082, # We moved from libmesh/src to src around here so maybe that caused it?
'2012-12-04', 752, 156903,
# 2013
'2013-01-04', 754, 158689,
'2013-02-04', 770, 161001,
'2013-03-04', 776, 162189,
'2013-04-04', 783, 162986,
'2013-05-04', 785, 163808,
'2013-06-04', 785, 164022,
'2013-07-04', 789, 163854,
'2013-08-04', 789, 164269,
'2013-09-04', 790, 165129,
'2013-10-04', 790, 165447,
'2013-11-04', 791, 166287,
'2013-12-04', 794, 168772,
# 2014
'2014-01-04', 796, 170174,
'2014-02-04', 796, 170395,
'2014-03-04', 799, 172037,
'2014-04-04', 801, 172262,
'2014-05-04', 806, 173772,
'2014-06-04', 807, 171098,
'2014-07-04', 807, 171220,
'2014-08-04', 808, 172534,
'2014-09-04', 808, 173639,
'2014-10-04', 819, 175750,
'2014-11-04', 819, 176415,
'2014-12-04', 819, 176277,
# 2015
'2015-01-04', 819, 176289,
'2015-02-04', 824, 176758,
'2015-03-04', 825, 176958,
'2015-04-04', 830, 176926,
'2015-05-04', 826, 176659,
'2015-06-04', 835, 178351,
'2015-07-04', 840, 179578,
'2015-08-04', 846, 181130,
'2015-09-04', 846, 181675,
'2015-10-04', 849, 181196,
'2015-11-04', 848, 181385,
'2015-12-04', 849, 180331,
# 2016
'2016-01-04', 849, 180538,
'2016-02-04', 846, 182244,
'2016-03-04', 846, 182727,
'2016-04-04', 849, 183261,
'2016-05-04', 849, 183393,
'2016-06-04', 853, 184649,
'2016-07-04', 839, 183363,
'2016-08-04', 837, 183308,
'2016-09-04', 842, 183850,
'2016-10-04', 847, 184879,
'2016-11-04', 850, 185408,
'2016-12-04', 853, 185683,
# 2017
'2017-01-04', 853, 185885,
'2017-02-04', 853, 186246,
'2017-03-04', 850, 184993,
'2017-04-04', 856, 185906,
'2017-05-04', 855, 186311,
'2017-06-04', 855, 186441,
'2017-07-04', 856, 186664,
'2017-08-04', 856, 186707,
'2017-09-04', 856, 186793,
]
# Extract the dates from the data array
date_strings = data[0::3]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%Y-%m-%d')))
# Extract number of files from data array
n_files = data[1::3]
# Extract number of lines of code from data array
n_lines = data[2::3]
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command.
# The colors used come from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
ax1 = fig.add_subplot(111)
ax1.plot(date_nums, n_files, color=u'#4878cf', marker='o', linestyle='-', markersize=3)
ax1.set_ylabel('Files (blue circles)')
# Set up x-tick locations
ticks_names = ['2003', '2005', '2007', '2009', '2011', '2013', '2015', '2017']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime(x + '-03-04', '%Y-%m-%d')))
# Set tick labels and positions
ax1.set_xticks(tick_nums)
ax1.set_xticklabels(ticks_names)
# Use the twinx() command to plot more data on the other axis
ax2 = ax1.twinx()
ax2.plot(date_nums, np.divide(n_lines, 1000.), color=u'#6acc65', marker='s', linestyle='-', markersize=3)
ax2.set_ylabel('Lines of code in thousands (green squares)')
# Create linear curve fits of the data
files_fit = np.polyfit(date_nums, n_files, 1)
lines_fit = np.polyfit(date_nums, n_lines, 1)
# Convert to files/month
files_per_month = files_fit[0]*(365./12.)
lines_per_month = lines_fit[0]*(365./12.)
# Print curve fit data on the plot , '%.1f'
files_msg = 'Approx. ' + '%.1f' % files_per_month + ' files added/month'
lines_msg = 'Approx. ' + '%.1f' % lines_per_month + ' lines added/month'
ax1.text(date_nums[len(date_nums)/4], 300, files_msg);
ax1.text(date_nums[len(date_nums)/4], 250, lines_msg);
# Save as PDF
plt.savefig('cloc_libmesh.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
Janderson/analise_dados_ob | python/miner_data.py | 1 | 7304 | # -*- coding: utf-8 -*-
## pega todos os arquivos csv da pasta arquivos
## o arquivo deve OBRIGATORIAMENTE seguir o padrao nome do [PAR/ATIVO]_
## e gera um arquivao, resumo.csv
import csv, os, codecs
from time import sleep
import pandas as pd
import numpy as np
from decimal import *
from shutil import copyfile
from pylab import *
debug = False
ano_inicio = 2014
mes_inicio = 01
dia_inicio = 01
tam_para_doji = 5
tam_trend = 2 # TAMANHO MINIMO DA TENDENCIA
size_limit = 50
clear_old_data = True
dir_of_files = "history_data/"
set_assets = ["gbpusd", "usdjpy", "audusd", "eurusd"]
set_timeframes = ["m15"]
set_assets = ["eurusd"]
#set_timeframes = ["h1"]
DATA_RESULT= "miner_result.csv"
def list_files():
return os.listdir(dir_of_files)
class MinerData:
def __init__(self, file_to_miner, minertype="HL", doji_size=2):
self.doji_size = doji_size
self.minertype = minertype # BD - Body, HL - HighLow Juntos, H - Apenas High, L - Apenas Low
self.filename_miner = file_to_miner
self.count_linhas = 0
self.trend = 0
self.size_candles = []
self.fullsize_candles = []
self.count_trend = {}
self.bar_data = dict()
self.clear_resultcsv()
self.persist_extr_data = False
self.datafile = open('temp/dt_an%s.csv' % self.filename_miner, 'r')
# Limpeza do arquivo original do metatrader
def clear_file(self):
fname = 'temp/dt_an%s.csv' % self.filename_miner
if not os.path.isfile(fname) or clear_old_data:
copyfile('%s%s.csv' % (dir_of_files, self.filename_miner), fname)
return fname.replace(".csv","")
def clear_resultcsv(self):
with open(self.result_filename(), "w") as csvfile:
csvfile.write("")
def writeline_csv(self):
if debug: raw_input()
self.result_csv = open(self.result_filename(), "a")
line = [self.filename_miner.split("_")[0], self.filename_miner.split("_")[1]] # linha 1 e 2 (par, timeframe)
line.append(str(self.bar_data["vdata"])) # linha 3 = data primeiro candle evento
line.append(str(self.minertype)) # linha 4 - tipo da mineraçao
line.append(str(abs(self.trend)))
line.append(str(self.trend))
line.append("\""+str(self.size_candles).replace("[","").replace("]","") + "\"" )
line.append("\""+str(self.fullsize_candles).replace("[","").replace("]","") + "\"" )
self.extract_data(True)
line.append(str(self.sizecandle()))
self.result_csv.write(";".join(line) + "\n")
self.result_csv.close()
def result_filename(self):
return "minerdata/%s_%s.csv" % (self.filename_miner, self.minertype)
def end_miner(self):
self.datafile.close()
def store_stats(self):
if not self.count_trend.has_key(abs(self.trend)):
self.count_trend[abs(self.trend)]=1
else:
self.count_trend[abs(self.trend)]+=1
def extract_data(self, persist=False):
if not self.persist_extr_data:
self.lastbar_data = self.bar_data
line = self.datafile.readline()
if line!='':
sline = line.split(",")
vdata = str(sline[0] + " "+ sline[1]).replace(".", "/")
vopen, vhigh, vlow, vclose = Decimal(sline[2]), Decimal(sline[3]), Decimal(sline[4]), Decimal(sline[5])
self.bar_data = dict(vdata=vdata,vopen=vopen,vhigh=vhigh,vlow=vlow,vclose=vclose)
if debug: sleep(1)
self.persist_extr_data = persist
self.last_line= line
return line
else:
self.persist_extr_data = False
return self.last_line
def gt(self): # define, dependendo do tipo de mineração se o valor é maior
if self.minertype == "BD":
if self.bar_data["vclose"] > self.bar_data["vopen"]:
return True
else:
return False
elif self.minertype == "HL":
if self.lastbar_data != {} and self.bar_data["vhigh"] > self.lastbar_data["vhigh"] and self.bar_data["vlow"] > self.lastbar_data["vlow"]:
return True
else:
return False
def lt(self): # define, dependendo do tipo de mineração se o valor é maior
if self.minertype == "BD":
if self.bar_data["vclose"] < self.bar_data["vopen"]:
return True
else:
return False
elif self.minertype == "HL":
if self.lastbar_data != {} and self.bar_data["vhigh"] < self.lastbar_data["vhigh"] and self.bar_data["vlow"] < self.lastbar_data["vlow"]:
return True
else:
return False
def sizecandle(self, absolute = True):
if absolute:
return int( abs(self.bar_data["vclose"] - self.bar_data["vopen"]) * 10 ** abs(Decimal(str((self.bar_data["vclose"] - self.bar_data["vopen"]))).as_tuple().exponent))
else:
return int( (self.bar_data["vclose"] - self.bar_data["vopen"]) * 10 ** abs(Decimal(str((self.bar_data["vclose"] - self.bar_data["vopen"]))).as_tuple().exponent))
def analise(self):
self.clear_file()
print "analisando", self.filename_miner, " tp:", self.minertype,
line = self.extract_data()
while (line != ''):
vdata, vopen, vhigh, vlow, vclose = self.bar_data["vdata"], self.bar_data["vopen"], self.bar_data["vhigh"], self.bar_data["vlow"], self.bar_data["vclose"]
alta=False
baixa=False
fullsize = int( abs(vhigh - vlow) * 10 ** abs(Decimal(str((vhigh - vlow))).as_tuple().exponent))
self.size_candles.append(self.sizecandle())
self.fullsize_candles.append(fullsize)
if debug: print "\n\n\n##############> self.trend:", self.trend, " size:", self.sizecandle(), "->",self.sizecandle(False), ",",fullsize, ":", self.size_candles
if debug: print "\nBarDt:",self.bar_data, "\nLastBarDt:", self.lastbar_data
if self.gt():
alta=True
if self.trend >=0:
if self.trend==0:
self.size_candles = []
self.fullsize_candles = []
self.trend+=1
else:
if abs(self.trend)>= tam_trend:
if debug:
print self.filename_miner, " ----> BIG TREND ", self.trend, "end data", vdata
self.writeline_csv()
self.store_stats()
self.size_candles = []
self.fullsize_candles = []
self.trend=1
if self.lt():
baixa=True
if self.trend <=0:
if self.trend==0:
self.size_candles = []
self.fullsize_candles = []
self.trend-=1
else:
# Fim da tendencia atual
if abs(self.trend)>= tam_trend:
if debug:
print self.filename_miner, " ----> BIG TREND ", self.trend, "end data", vdata
#print str(self.size_candles)
self.writeline_csv()
self.store_stats()
self.size_candles = []
self.fullsize_candles = []
self.trend=-1
if alta == False and baixa == False:
self.trend=0
self.count_linhas+=1
line = self.extract_data()
self.end_miner()
print "[ok]"
return self.count_linhas, self.count_trend
if __name__=="__main__":
#all_count = []
for filename in list_files():
filename = filename.lower()
filesplit = filename.replace(".csv", "").split("_")
if ".csv" in filename and filesplit>1 and filesplit[1] in set_timeframes and filesplit[0] in set_assets:
for typem in ["BD", "HL"]:
filename = filename.replace(".csv", "")
md = MinerData(filename, typem)
r = md.analise()
#for item in r[1].iteritems():
# all_count.append((filename.upper(), filename.split("_")[0].upper() + ";" + filename.split("_")[1] + ";" + str(r[0]) + ";" +str(item[0]) + ";" + str(item[1])))
#resume_f = file('resumo_miner.csv','w')
#resume_f.write("\n")
#print "gerando resumo ... ",
#for i in all_count:
# resume_f.write(str(i[1]))
# resume_f.write("\n")
#resume_f.close()
print "DONE"
| gpl-3.0 |
theodoregoetz/histogram | test/graphics/test_histogram_mpl.py | 1 | 2786 | # coding: utf-8
from __future__ import unicode_literals
import unittest
from warnings import simplefilter
from numpy import random as rand
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['DejaVu Sans']
import matplotlib.pyplot as plt
from histogram import Histogram
from .. import conf
class TestHistogramsMpl(unittest.TestCase):
def setUp(self):
if conf.fast or conf.comparator is None:
raise unittest.SkipTest('slow tests')
simplefilter('ignore')
def test_hist_1d(self):
rand.seed(1)
h = Histogram(40, [0,1], 'χ (cm)', 'counts', 'Some Data')
h.fill(rand.normal(0.5, 0.1, 2000))
fig, ax = plt.subplots()
pt = ax.plothist(h)
self.assertTrue(conf.comparator.compare_images(
fig.savefig, 'hist_1d.png'))
def test_hist_1d_errorbar(self):
rand.seed(1)
h = Histogram(40, [0,1], 'χ (cm)', 'counts', 'Some Data')
h.fill(rand.normal(0.5, 0.1, 300))
fig, ax = plt.subplots()
pt = ax.plothist(h, style='errorbar')
self.assertTrue(conf.comparator.compare_images(
fig.savefig, 'hist_1d_errorbar.png'))
def test_hist_1d_line(self):
rand.seed(1)
h = Histogram(40, [0,1], 'χ (cm)', 'counts', 'Some Data')
h.fill(rand.normal(0.5, 0.1, 300))
fig, ax = plt.subplots()
pt = ax.plothist(h, style='line')
self.assertTrue(conf.comparator.compare_images(
fig.savefig, 'hist_1d_line.png'))
def test_hist_2d(self):
rand.seed(1)
h = Histogram(40, [0,1], 'χ (cm)', 30, [-5,5], 'ψ', 'counts', 'Some Data')
h.fill(rand.normal(0.5, 0.1, 1000), rand.normal(0,1,1000))
fig, ax = plt.subplots()
pt = ax.plothist(h)
self.assertTrue(conf.comparator.compare_images(
fig.savefig, 'hist_2d.png'))
def test_hist_1d_nonuniform(self):
h = Histogram([0,1,3], data=[1,2])
fig, ax = plt.subplots(2)
pt = ax[0].plothist(h, style='polygon')
pt = ax[0].plothist(h, style='line', color='black', lw=2)
pt = ax[1].plothist(h, style='errorbar')
self.assertTrue(conf.comparator.compare_images(
fig.savefig, 'hist_1d_nonuniform.png'))
def test_hist_1d_negative(self):
h = Histogram([0,1,3], data=[-1,2])
fig, ax = plt.subplots(2)
pt = ax[0].plothist(h, style='polygon')
pt = ax[0].plothist(h, style='line', color='black', lw=2)
pt = ax[1].plothist(h, style='errorbar')
self.assertTrue(conf.comparator.compare_images(
fig.savefig, 'hist_1d_negative.png'))
if __name__ == '__main__':
from .. import main
main()
| gpl-3.0 |
mattgiguere/scikit-learn | sklearn/ensemble/__init__.py | 44 | 1228 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
gtesei/fast-furious | competitions/quora-question-pairs/alstm.py | 1 | 9742 | '''
Single model may achieve LB scores at around 0.29+ ~ 0.30+
Average ensembles can easily get 0.28+ or less
Don't need to be an expert of feature engineering
All you need is a GPU!!!!!!!
'''
########################################
## import packages
########################################
import os
import re
import csv
import codecs
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from string import punctuation
from gensim.models import KeyedVectors
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers.merge import concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
########################################
## set directories and parameters
########################################
BASE_DIR = './data/'
EMBEDDING_FILE = 'GoogleNews-vectors-negative300.bin'
TRAIN_DATA_FILE = BASE_DIR + 'train.csv'
TEST_DATA_FILE = BASE_DIR + 'test.csv'
#MAX_SEQUENCE_LENGTH = 30
MAX_SEQUENCE_LENGTH = 80
MAX_NB_WORDS = 200000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.1
num_lstm = np.random.randint(175, 275)
num_dense = np.random.randint(100, 150)
rate_drop_lstm = 0.15 + np.random.rand() * 0.35
rate_drop_dense = 0.15 + np.random.rand() * 0.35
act = 'relu'
re_weight = True # whether to re-weight classes to fit the 17.5% share in test set
STAMP = 'lstm_%d_%d_%.2f_%.2f'%(num_lstm, num_dense, rate_drop_lstm, \
rate_drop_dense)
########################################
## index word vectors
########################################
print('Indexing word vectors')
word2vec = KeyedVectors.load_word2vec_format(EMBEDDING_FILE, \
binary=True)
print('Found %s word vectors of word2vec' % len(word2vec.vocab))
########################################
## process texts in datasets
########################################
print('Processing text dataset')
# The function "text_to_wordlist" is from
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally, remove stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"60k", " 60000 ", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return(text)
texts_1 = []
texts_2 = []
labels = []
with codecs.open(TRAIN_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for values in reader:
texts_1.append(text_to_wordlist(values[3]))
texts_2.append(text_to_wordlist(values[4]))
labels.append(int(values[5]))
print('Found %s texts in train.csv' % len(texts_1))
test_texts_1 = []
test_texts_2 = []
test_ids = []
with codecs.open(TEST_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for values in reader:
test_texts_1.append(text_to_wordlist(values[1]))
test_texts_2.append(text_to_wordlist(values[2]))
test_ids.append(values[0])
print('Found %s texts in test.csv' % len(test_texts_1))
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts_1 + texts_2 + test_texts_1 + test_texts_2)
sequences_1 = tokenizer.texts_to_sequences(texts_1)
sequences_2 = tokenizer.texts_to_sequences(texts_2)
test_sequences_1 = tokenizer.texts_to_sequences(test_texts_1)
test_sequences_2 = tokenizer.texts_to_sequences(test_texts_2)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
data_1 = pad_sequences(sequences_1, maxlen=MAX_SEQUENCE_LENGTH)
data_2 = pad_sequences(sequences_2, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(labels)
print('Shape of data tensor:', data_1.shape)
print('Shape of label tensor:', labels.shape)
test_data_1 = pad_sequences(test_sequences_1, maxlen=MAX_SEQUENCE_LENGTH)
test_data_2 = pad_sequences(test_sequences_2, maxlen=MAX_SEQUENCE_LENGTH)
test_ids = np.array(test_ids)
########################################
## prepare embeddings
########################################
print('Preparing embedding matrix')
nb_words = min(MAX_NB_WORDS, len(word_index))+1
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for word, i in word_index.items():
if word in word2vec.vocab:
embedding_matrix[i] = word2vec.word_vec(word)
print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))
########################################
## sample train/validation data
########################################
#np.random.seed(1234)
perm = np.random.permutation(len(data_1))
idx_train = perm[:int(len(data_1)*(1-VALIDATION_SPLIT))]
idx_val = perm[int(len(data_1)*(1-VALIDATION_SPLIT)):]
data_1_train = np.vstack((data_1[idx_train], data_2[idx_train]))
data_2_train = np.vstack((data_2[idx_train], data_1[idx_train]))
labels_train = np.concatenate((labels[idx_train], labels[idx_train]))
data_1_val = np.vstack((data_1[idx_val], data_2[idx_val]))
data_2_val = np.vstack((data_2[idx_val], data_1[idx_val]))
labels_val = np.concatenate((labels[idx_val], labels[idx_val]))
weight_val = np.ones(len(labels_val))
if re_weight:
weight_val *= 0.472001959
weight_val[labels_val==0] = 1.309028344
########################################
## define the model structure
########################################
embedding_layer = Embedding(nb_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm)
#lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm , activation=act , recurrent_activation='tanh' )
sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_1 = embedding_layer(sequence_1_input)
x1 = lstm_layer(embedded_sequences_1)
sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_2 = embedding_layer(sequence_2_input)
y1 = lstm_layer(embedded_sequences_2)
merged = concatenate([x1, y1])
merged = Dropout(rate_drop_dense)(merged)
merged = BatchNormalization()(merged)
merged = Dense(num_dense, activation=act)(merged)
merged = Dropout(rate_drop_dense)(merged)
merged = BatchNormalization()(merged)
preds = Dense(1, activation='sigmoid')(merged)
########################################
## add class weight
########################################
if re_weight:
class_weight = {0: 1.309028344, 1: 0.472001959}
else:
class_weight = None
########################################
## train the model
########################################
model = Model(inputs=[sequence_1_input, sequence_2_input], \
outputs=preds)
model.compile(loss='binary_crossentropy',
optimizer='nadam',
metrics=['acc'])
#model.summary()
print(STAMP)
early_stopping =EarlyStopping(monitor='val_loss', patience=0 )
bst_model_path = STAMP + '.h5'
model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True)
hist = model.fit([data_1_train, data_2_train], labels_train, \
validation_data=([data_1_val, data_2_val], labels_val, weight_val), \
epochs=200, batch_size=2048, shuffle=True, \
class_weight=class_weight, callbacks=[early_stopping, model_checkpoint])
model.load_weights(bst_model_path)
bst_val_score = min(hist.history['val_loss'])
########################################
## make the submission
########################################
print('Start making the submission before fine-tuning')
preds = model.predict([test_data_1, test_data_2], batch_size=2048, verbose=1)
preds += model.predict([test_data_2, test_data_1], batch_size=2048, verbose=1)
preds /= 2
submission = pd.DataFrame({'test_id':test_ids, 'is_duplicate':preds.ravel()})
submission.to_csv('%.4f_'%(bst_val_score)+STAMP+'.csv', index=False)
| mit |
previtus/MGR-Project-Code | ResultsGraphing/621_base_cnns_on299.py | 1 | 2947 | import os
from Omnipresent import len_
from Downloader.VisualizeHistory import loadHistory
from ResultsGraphing.custom import plot_only_averages, finally_show, count_averages, plot_together,save_plot
dir_folder = os.path.dirname(os.path.abspath(__file__))
###
"""
The idea:
Test various base cnns.
We have dataset 5556x_mark_res_299x299 with mix model made with variable base cnn:
vgg16
vgg19
resnet50
xception
inception v3
One graph to compare only avg valid.
One graph to compare everything - maybe thats gonna also be legible.
"""
SAVE = False
path_folder = dir_folder + ''
path_folder = '/data/k-fold-tests/6.2.1. different CNN/alt_diffCNNs_try_299set/'
out_folder_1 = dir_folder + '/graphs/6.2.1._different_CNN_299/fig_average_valerr_299'
out_folder_2 = dir_folder + '/graphs/6.2.1._different_CNN_299/fig_allerr_299'
resnet50 = path_folder + "5556x_mark_res_299x299_resnet50_cnn_100.npy"
vgg16 = path_folder + "5556x_mark_res_299x299_vgg16_cnn_100.npy"
vgg19 = path_folder + "5556x_mark_res_299x299_vgg19_cnn_100.npy"
inception_v3 = path_folder + "5556x_mark_res_299x299_inception_v3_cnn_100.npy"
xception = path_folder + "5556x_mark_res_299x299_xception_cnn_100.npy"
data_paths = [resnet50, vgg16, vgg19, xception, inception_v3]
val_data_names = ["resnet50","vgg16","vgg19","xception","inception_v3"]
train_data_names = ["resnet50","vgg16","vgg19","xception","inception_v3"]
hard_colors = ['red', 'green', 'blue', 'orange', 'purple']
light_colors = ['pink', 'lightgreen', 'lightblue', 'yellow', 'hotpink']
both_colors = []
both_data_names = []
special_histories = []
for i in range(0,len(data_paths)):
special_histories.append(loadHistory(data_paths[i]))
special_histories[i] = count_averages(special_histories[i], 'loss')
both_colors.append(light_colors[i])
both_colors.append(hard_colors[i])
both_data_names.append(train_data_names[i])
both_data_names.append(val_data_names[i]+" validation")
import matplotlib.pyplot as plt
#custom_title = 'Validation error averages'
#plt = plot_only_averages(plt, special_histories, val_data_names, hard_colors, custom_title)
#custom_title = 'Training error averages'
#plt = plot_only_averages(plt, special_histories, train_data_names, light_colors, custom_title, just='train')
custom_title = 'Validation and Training error averages, 299px'
plt = plot_only_averages(plt, special_histories, both_data_names, both_colors, custom_title, just='both',
save=[SAVE,out_folder_1])
# FIGURE 2
custom_title = 'Base model comparison, 299px'
colors = []
for c in hard_colors:
colors.append(c)
colors.append(c)
names_to_print = []
for i in val_data_names:
names_to_print.append(i + 'average val')
names_to_print.append(i + 'val')
print colors
print names_to_print
plt = plot_together(special_histories, names_to_print, colors, custom_title)
save_plot(plt, SAVE, out_folder_2)
finally_show(plt) | mit |
ssaeger/scikit-learn | sklearn/externals/joblib/__init__.py | 31 | 4757 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
anjalisood/spark-tk | python/sparktk/frame/ops/classification_metrics_value.py | 7 | 3082 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from sparktk.propobj import PropertiesObject
class ClassificationMetricsValue(PropertiesObject):
"""
ClassificationMetricsValue class used to hold the data returned from binary_classification_metrics()
and multiclass_classification_metrics().
"""
def __init__(self, tc, scala_result):
self._tc = tc
if scala_result:
self._accuracy = scala_result.accuracy()
cm = scala_result.confusionMatrix()
if cm:
self._confusion_matrix = cm
column_list = self._tc.jutils.convert.from_scala_seq(cm.columnLabels())
row_label_list = self._tc.jutils.convert.from_scala_seq(cm.rowLabels())
header = ["Predicted_" + column.title() for column in column_list]
row_index = ["Actual_" + row_label.title() for row_label in row_label_list]
data = [list(x) for x in list(cm.getMatrix())]
self._confusion_matrix = pd.DataFrame(data, index=row_index, columns=header)
else:
#empty pandas frame
self._confusion_matrix = pd.DataFrame()
self._f_measure = scala_result.fMeasure()
self._precision = scala_result.precision()
self._recall = scala_result.recall()
else:
self._accuracy = 0.0
self._f_measure = 0.0
self._recall = 0.0
self._precision = 0.0
self._confusion_matrix = 0.0
@property
def accuracy(self):
return self._accuracy
@accuracy.setter
def accuracy(self, value):
self._accuracy = value
@property
def confusion_matrix(self):
return self._confusion_matrix
@confusion_matrix.setter
def confusion_matrix(self, value):
self._confusion_matrix = value
@property
def f_measure(self):
return self._f_measure
@f_measure.setter
def f_measure(self, value):
self._f_measure = value
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, value):
self._precision = value
@property
def recall(self):
return self._recall
@recall.setter
def recall(self, value):
self._recall = value
@staticmethod
def get_context(self):
return self._tc
| apache-2.0 |
loli/semisupervisedforests | examples/cluster/plot_digits_linkage.py | 369 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
cython-testbed/pandas | pandas/core/computation/scope.py | 6 | 9230 | """
Module for scope operations
"""
import sys
import struct
import inspect
import datetime
import itertools
import pprint
import numpy as np
import pandas
import pandas as pd # noqa
from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.core.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': pandas._libs.tslib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
unicode_str = '{name}(scope={scope_keys}, resolvers={res_keys})'
return unicode_str.format(name=type(self).__name__,
scope_keys=scope_keys,
res_keys=res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__,
num=self.ntemps,
hex_id=_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| bsd-3-clause |
ScatterHQ/eliot | eliot/tests/test_dask.py | 1 | 6737 | """Tests for eliot.dask."""
from unittest import TestCase, skipUnless
from ..testing import capture_logging, LoggedAction, LoggedMessage
from .. import start_action, log_message
try:
import dask
from dask.bag import from_sequence
from dask.distributed import Client
import dask.dataframe as dd
import pandas as pd
except ImportError:
dask = None
else:
from ..dask import (
compute_with_trace,
_RunWithEliotContext,
_add_logging,
persist_with_trace,
)
@skipUnless(dask, "Dask not available.")
class DaskTests(TestCase):
"""Tests for end-to-end functionality."""
def setUp(self):
dask.config.set(scheduler="threading")
def test_compute(self):
"""compute_with_trace() runs the same logic as compute()."""
bag = from_sequence([1, 2, 3])
bag = bag.map(lambda x: x * 7).map(lambda x: x * 4)
bag = bag.fold(lambda x, y: x + y)
self.assertEqual(dask.compute(bag), compute_with_trace(bag))
def test_future(self):
"""compute_with_trace() can handle Futures."""
client = Client(processes=False)
self.addCleanup(client.shutdown)
[bag] = dask.persist(from_sequence([1, 2, 3]))
bag = bag.map(lambda x: x * 5)
result = dask.compute(bag)
self.assertEqual(result, ([5, 10, 15],))
self.assertEqual(result, compute_with_trace(bag))
def test_persist_result(self):
"""persist_with_trace() runs the same logic as process()."""
client = Client(processes=False)
self.addCleanup(client.shutdown)
bag = from_sequence([1, 2, 3])
bag = bag.map(lambda x: x * 7)
self.assertEqual(
[b.compute() for b in dask.persist(bag)],
[b.compute() for b in persist_with_trace(bag)],
)
def test_persist_pandas(self):
"""persist_with_trace() with a Pandas dataframe.
This ensures we don't blow up, which used to be the case.
"""
df = pd.DataFrame()
df = dd.from_pandas(df, npartitions=1)
persist_with_trace(df)
@capture_logging(None)
def test_persist_logging(self, logger):
"""persist_with_trace() preserves Eliot context."""
def persister(bag):
[bag] = persist_with_trace(bag)
return dask.compute(bag)
self.assert_logging(logger, persister, "dask:persist")
@capture_logging(None)
def test_compute_logging(self, logger):
"""compute_with_trace() preserves Eliot context."""
self.assert_logging(logger, compute_with_trace, "dask:compute")
def assert_logging(self, logger, run_with_trace, top_action_name):
"""Utility function for _with_trace() logging tests."""
def mult(x):
log_message(message_type="mult")
return x * 4
def summer(x, y):
log_message(message_type="finally")
return x + y
bag = from_sequence([1, 2])
bag = bag.map(mult).fold(summer)
with start_action(action_type="act1"):
run_with_trace(bag)
[logged_action] = LoggedAction.ofType(logger.messages, "act1")
self.assertEqual(
logged_action.type_tree(),
{
"act1": [
{
top_action_name: [
{"eliot:remote_task": ["dask:task", "mult"]},
{"eliot:remote_task": ["dask:task", "mult"]},
{"eliot:remote_task": ["dask:task", "finally"]},
]
}
]
},
)
# Make sure dependencies are tracked:
(
mult1_msg,
mult2_msg,
final_msg,
) = LoggedMessage.ofType(logger.messages, "dask:task")
self.assertEqual(
sorted(final_msg.message["dependencies"]),
sorted([mult1_msg.message["key"], mult2_msg.message["key"]]),
)
# Make sure dependencies are logically earlier in the logs:
self.assertTrue(
mult1_msg.message["task_level"] < final_msg.message["task_level"]
)
self.assertTrue(
mult2_msg.message["task_level"] < final_msg.message["task_level"]
)
@skipUnless(dask, "Dask not available.")
class AddLoggingTests(TestCase):
"""Tests for _add_logging()."""
maxDiff = None
def test_add_logging_to_full_graph(self):
"""_add_logging() recreates Dask graph with wrappers."""
bag = from_sequence([1, 2, 3])
bag = bag.map(lambda x: x * 7).map(lambda x: x * 4)
bag = bag.fold(lambda x, y: x + y)
graph = bag.__dask_graph__()
# Add logging:
with start_action(action_type="bleh"):
logging_added = _add_logging(graph)
# Ensure resulting graph hasn't changed substantively:
logging_removed = {}
for key, value in logging_added.items():
if callable(value[0]):
func, args = value[0], value[1:]
self.assertIsInstance(func, _RunWithEliotContext)
value = (func.func,) + args
logging_removed[key] = value
self.assertEqual(logging_removed, graph)
def test_add_logging_explicit(self):
"""_add_logging() on more edge cases of the graph."""
def add(s):
return s + "s"
def add2(s):
return s + "s"
# b runs first, then d, then a and c.
graph = {
"a": "d",
"d": [1, 2, (add, "b")],
("b", 0): 1,
"c": (add2, "d"),
}
with start_action(action_type="bleh") as action:
task_id = action.task_uuid
self.assertEqual(
_add_logging(graph),
{
"d": [
1,
2,
(
_RunWithEliotContext(
task_id=task_id + "@/2",
func=add,
key="d",
dependencies=["b"],
),
"b",
),
],
"a": "d",
("b", 0): 1,
"c": (
_RunWithEliotContext(
task_id=task_id + "@/3",
func=add2,
key="c",
dependencies=["d"],
),
"d",
),
},
)
| apache-2.0 |
evgchz/scikit-learn | sklearn/feature_selection/variance_threshold.py | 12 | 2440 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
return self.variances_ > self.threshold
| bsd-3-clause |
antiface/mne-python | tutorials/plot_epochs_objects.py | 7 | 3927 | """
.. _tut_epochs_objects:
The :class:`Epochs <mne.Epochs>` data structure: epoched data
=============================================================
"""
from __future__ import print_function
import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
###############################################################################
# :class:`Epochs <mne.Epochs>` objects are a way of representing continuous
# data as a collection of time-locked trials, stored in an array of
# `shape(n_events, n_channels, n_times)`. They are useful for many statistical
# methods in neuroscience, and make it easy to quickly overview what occurs
# during a trial.
#
# :class:`Epochs <mne.Epochs>` objects can be created in three ways:
# 1. From a :class:`Raw <mne.io.RawFIF>` object, along with event times
# 2. From an :class:`Epochs <mne.Epochs>` object that has been saved as a
# `.fif` file
# 3. From scratch using :class:`EpochsArray <mne.EpochsArray>`
# Load a dataset that contains events
raw = mne.io.RawFIF(
op.join(mne.datasets.sample.data_path(), 'MEG', 'sample',
'sample_audvis_raw.fif'))
# If your raw object has a stim channel, you can construct an event array
# easily
events = mne.find_events(raw, stim_channel='STI 014')
# Show the number of events (number of rows)
print('Number of events:', len(events))
# Show all unique event codes (3rd column)
print('Unique event codes:', np.unique(events[:, 2]))
# Specify event codes of interest with descriptive labels
event_id = dict(left=1, right=2)
###############################################################################
# Now, we can create an :class:`mne.Epochs` object with the events we've
# extracted. Note that epochs constructed in this manner will not have their
# data available until explicitly read into memory, which you can do with
# :func:`get_data <mne.Epochs.get_data>`. Alternatively, you can use
# `preload=True`.
#
# Note that there are many options available when loading an
# :class:`mne.Epochs` object. For more detailed information, see (**LINK TO
# EPOCHS LOADING TUTORIAL**)
# Expose the raw data as epochs, cut from -0.1 s to 1.0 s relative to the event
# onsets
epochs = mne.Epochs(raw, events, event_id, tmin=-0.1, tmax=1,
baseline=(None, 0), preload=True)
print(epochs)
###############################################################################
# Epochs behave similarly to :class:`mne.io.Raw` objects. They have an
# :class:`info <mne.io.meas_info.Info>` attribute that has all of the same
# information, as well as a number of attributes unique to the events contained
# within the object.
print(epochs.events[:3], epochs.event_id, sep='\n\n')
###############################################################################
# You can select subsets of epochs by indexing the :class:`Epochs <mne.Epochs>`
# object directly. Alternatively, if you have epoch names specified in
# `event_id` then you may index with strings instead.
print(epochs[1:5])
print(epochs['right'])
###############################################################################
# It is also possible to iterate through :class:`Epochs <mne.Epochs>` objects
# in this way. Note that behavior is different if you iterate on `Epochs`
# directly rather than indexing:
# These will be epochs objects
for i in range(3):
print(epochs[i])
# These will be arrays
for ep in epochs[:2]:
print(ep)
###############################################################################
# If you wish to look at the average across trial types, then you may do so,
# creating an `Evoked` object in the process.
ev_left = epochs['left'].average()
ev_right = epochs['right'].average()
f, axs = plt.subplots(3, 2, figsize=(10, 5))
_ = f.suptitle('Left / Right', fontsize=20)
_ = ev_left.plot(axes=axs[:, 0], show=False)
_ = ev_right.plot(axes=axs[:, 1], show=False)
plt.tight_layout()
| bsd-3-clause |
JoeJimFlood/RugbyPredictifier | 2020SuperRugby/round_matrix.py | 2 | 6192 | import os
os.chdir(os.path.dirname(__file__))
import pandas as pd
import matchup
import xlsxwriter
import xlrd
import sys
import time
import collections
import matplotlib.pyplot as plt
def rgb2hex(r, g, b):
r_hex = hex(r)[-2:].replace('x', '0')
g_hex = hex(g)[-2:].replace('x', '0')
b_hex = hex(b)[-2:].replace('x', '0')
return '#' + r_hex + g_hex + b_hex
round_timer = time.time()
round_number = 'SF_Matrix'
matchups = collections.OrderedDict()
matchups['CRUSADERS'] = [('CRUSADERS', 'JAGUARES'),
('CRUSADERS', 'BRUMBIES'),
('CRUSADERS', 'HURRICANES')]
matchups['JAGUARES'] = [('JAGUARES', 'BRUMBIES'),
('JAGUARES', 'HURRICANES')]
matchups['BRUMBIES'] = [('BRUMBIES', 'HURRICANES')]
location = os.getcwd().replace('\\', '/')
output_file = location + '/Weekly Forecasts/Round_' + str(round_number) + '.xlsx'
output_fig = location + '/Weekly Forecasts/Round_' + str(round_number) + '.png'
n_games = 0
for day in matchups:
n_games += len(matchups[day])
colours = {}
team_formats = {}
colour_df = pd.DataFrame.from_csv(location + '/colours.csv')
teams = list(colour_df.index)
for team in teams:
primary = rgb2hex(int(colour_df.loc[team, 'R1']), int(colour_df.loc[team, 'G1']), int(colour_df.loc[team, 'B1']))
secondary = rgb2hex(int(colour_df.loc[team, 'R2']), int(colour_df.loc[team, 'G2']), int(colour_df.loc[team, 'B2']))
colours[team] = (primary, secondary)
for read_data in range(1):
week_book = xlsxwriter.Workbook(output_file)
header_format = week_book.add_format({'align': 'center', 'bold': True, 'bottom': True})
index_format = week_book.add_format({'align': 'right', 'bold': True})
score_format = week_book.add_format({'num_format': '#0', 'align': 'right'})
percent_format = week_book.add_format({'num_format': '#0%', 'align': 'right'})
for team in teams:
team_formats[team] = week_book.add_format({'align': 'center', 'bold': True, 'border': True,
'bg_color': colours[team][0], 'font_color': colours[team][1]})
for game_time in matchups:
if read_data:
data_book = xlrd.open_workbook(output_file)
data_sheet = data_book.sheet_by_name(game_time)
sheet = week_book.add_worksheet(game_time)
sheet.write_string(1, 0, 'Chance of Winning', index_format)
sheet.write_string(2, 0, 'Expected Score', index_format)
for i in range(1, 20):
sheet.write_string(2+i, 0, str(5*i) + 'th Percentile Score', index_format)
sheet.write_string(22, 0, 'Chance of Bonus Point Win', index_format)
#sheet.write_string(23, 0, 'Chance of 4-Try Bonus Point with Draw', index_format)
#sheet.write_string(24, 0, 'Chance of 4-Try Bonus Point with Loss', index_format)
sheet.write_string(23, 0, 'Chance of Losing Bonus Point', index_format)
sheet.freeze_panes(0, 1)
games = matchups[game_time]
for i in range(len(games)):
home = games[i][0]
away = games[i][1]
homecol = 3 * i + 1
awaycol = 3 * i + 2
sheet.write_string(0, homecol, home, team_formats[home])
sheet.write_string(0, awaycol, away, team_formats[away])
sheet.write_string(0, awaycol + 1, ' ')
if read_data:
sheet.write_number(1, homecol, data_sheet.cell(1, homecol).value, percent_format)
sheet.write_number(1, awaycol, data_sheet.cell(1, awaycol).value, percent_format)
for rownum in range(2, 22):
sheet.write_number(rownum, homecol, data_sheet.cell(rownum, homecol).value, score_format)
sheet.write_number(rownum, awaycol, data_sheet.cell(rownum, awaycol).value, score_format)
for rownum in range(22, 26):
sheet.write_number(rownum, homecol, data_sheet.cell(rownum, homecol).value, percent_format)
sheet.write_number(rownum, awaycol, data_sheet.cell(rownum, awaycol).value, percent_format)
else:
results = matchup.matchup(home, away)
probwin = results['ProbWin']
sheet.write_number(1, homecol, probwin[home], percent_format)
sheet.write_number(1, awaycol, probwin[away], percent_format)
home_dist = results['Scores'][home]
away_dist = results['Scores'][away]
home_bp = results['Bonus Points'][home]
away_bp = results['Bonus Points'][away]
sheet.write_number(2, homecol, home_dist['mean'], score_format)
sheet.write_number(2, awaycol, away_dist['mean'], score_format)
for i in range(1, 20):
#print(type(home_dist))
#print(home_dist[str(5*i)+'%'])
sheet.write_number(2+i, homecol, home_dist[str(5*i)+'%'], score_format)
sheet.write_number(2+i, awaycol, away_dist[str(5*i)+'%'], score_format)
sheet.write_number(22, homecol, home_bp['4-Try Bonus Point with Win'], percent_format)
#sheet.write_number(23, homecol, home_bp['Try-Scoring Bonus Point with Draw'], percent_format)
#sheet.write_number(24, homecol, home_bp['Try-Scoring Bonus Point with Loss'], percent_format)
sheet.write_number(23, homecol, home_bp['Losing Bonus Point'], percent_format)
sheet.write_number(22, awaycol, away_bp['4-Try Bonus Point with Win'], percent_format)
#sheet.write_number(23, awaycol, away_bp['Try-Scoring Bonus Point with Draw'], percent_format)
#sheet.write_number(24, awaycol, away_bp['Try-Scoring Bonus Point with Loss'], percent_format)
sheet.write_number(23, awaycol, away_bp['Losing Bonus Point'], percent_format)
if i != len(games) - 1:
sheet.write_string(0, 3 * i + 3, ' ')
week_book.close()
print('Round ' + str(round_number) + ' predictions calculated in ' + str(round((time.time() - round_timer) / 60, 2)) + ' minutes') | mit |
ryfeus/lambda-packs | Pandas_numpy/source/numpy/doc/creation.py | 12 | 5501 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to NumPy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic NumPy Array Creation
==============================
NumPy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: h5py
FITS: Astropy
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| mit |
r888888888/models | cognitive_mapping_and_planning/tfcode/cmp_utils.py | 14 | 6936 | # Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for setting up the CMP graph.
"""
import os, numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
import logging
from src import utils
import src.file_utils as fu
from tfcode import tf_utils
resnet_v2 = tf_utils.resnet_v2
custom_residual_block = tf_utils.custom_residual_block
def value_iteration_network(
fr, num_iters, val_neurons, action_neurons, kernel_size, share_wts=False,
name='vin', wt_decay=0.0001, activation_fn=None, shape_aware=False):
"""
Constructs a Value Iteration Network, convolutions and max pooling across
channels.
Input:
fr: NxWxHxC
val_neurons: Number of channels for maintaining the value.
action_neurons: Computes action_neurons * val_neurons at each iteration to
max pool over.
Output:
value image: NxHxWx(val_neurons)
"""
init_var = np.sqrt(2.0/(kernel_size**2)/(val_neurons*action_neurons))
vals = []
with tf.variable_scope(name) as varscope:
if shape_aware == False:
fr_shape = tf.unstack(tf.shape(fr))
val_shape = tf.stack(fr_shape[:-1] + [val_neurons])
val = tf.zeros(val_shape, name='val_init')
else:
val = tf.expand_dims(tf.zeros_like(fr[:,:,:,0]), dim=-1) * \
tf.constant(0., dtype=tf.float32, shape=[1,1,1,val_neurons])
val_shape = tf.shape(val)
vals.append(val)
for i in range(num_iters):
if share_wts:
# The first Value Iteration maybe special, so it can have its own
# paramterss.
scope = 'conv'
if i == 0: scope = 'conv_0'
if i > 1: varscope.reuse_variables()
else:
scope = 'conv_{:d}'.format(i)
val = slim.conv2d(tf.concat([val, fr], 3, name='concat_{:d}'.format(i)),
num_outputs=action_neurons*val_neurons,
kernel_size=kernel_size, stride=1, activation_fn=activation_fn,
scope=scope, normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var),
biases_initializer=tf.zeros_initializer())
val = tf.reshape(val, [-1, action_neurons*val_neurons, 1, 1],
name='re_{:d}'.format(i))
val = slim.max_pool2d(val, kernel_size=[action_neurons,1],
stride=[action_neurons,1], padding='VALID',
scope='val_{:d}'.format(i))
val = tf.reshape(val, val_shape, name='unre_{:d}'.format(i))
vals.append(val)
return val, vals
def rotate_preds(loc_on_map, relative_theta, map_size, preds,
output_valid_mask):
with tf.name_scope('rotate'):
flow_op = tf_utils.get_flow(loc_on_map, relative_theta, map_size=map_size)
if type(preds) != list:
rotated_preds, valid_mask_warps = tf_utils.dense_resample(preds, flow_op,
output_valid_mask)
else:
rotated_preds = [] ;valid_mask_warps = []
for pred in preds:
rotated_pred, valid_mask_warp = tf_utils.dense_resample(pred, flow_op,
output_valid_mask)
rotated_preds.append(rotated_pred)
valid_mask_warps.append(valid_mask_warp)
return rotated_preds, valid_mask_warps
def get_visual_frustum(map_size, shape_like, expand_dims=[0,0]):
with tf.name_scope('visual_frustum'):
l = np.tril(np.ones(map_size)) ;l = l + l[:,::-1]
l = (l == 2).astype(np.float32)
for e in expand_dims:
l = np.expand_dims(l, axis=e)
confs_probs = tf.constant(l, dtype=tf.float32)
confs_probs = tf.ones_like(shape_like, dtype=tf.float32) * confs_probs
return confs_probs
def deconv(x, is_training, wt_decay, neurons, strides, layers_per_block,
kernel_size, conv_fn, name, offset=0):
"""Generates a up sampling network with residual connections.
"""
batch_norm_param = {'center': True, 'scale': True,
'activation_fn': tf.nn.relu,
'is_training': is_training}
outs = []
for i, (neuron, stride) in enumerate(zip(neurons, strides)):
for s in range(layers_per_block):
scope = '{:s}_{:d}_{:d}'.format(name, i+1+offset,s+1)
x = custom_residual_block(x, neuron, kernel_size, stride, scope,
is_training, wt_decay, use_residual=True,
residual_stride_conv=True, conv_fn=conv_fn,
batch_norm_param=batch_norm_param)
stride = 1
outs.append((x,True))
return x, outs
def fr_v2(x, output_neurons, inside_neurons, is_training, name='fr',
wt_decay=0.0001, stride=1, updates_collections=tf.GraphKeys.UPDATE_OPS):
"""Performs fusion of information between the map and the reward map.
Inputs
x: NxHxWxC1
Outputs
fr map: NxHxWx(output_neurons)
"""
if type(stride) != list:
stride = [stride]
with slim.arg_scope(resnet_v2.resnet_utils.resnet_arg_scope(
is_training=is_training, weight_decay=wt_decay)):
with slim.arg_scope([slim.batch_norm], updates_collections=updates_collections) as arg_sc:
# Change the updates_collections for the conv normalizer_params to None
for i in range(len(arg_sc.keys())):
if 'convolution' in arg_sc.keys()[i]:
arg_sc.values()[i]['normalizer_params']['updates_collections'] = updates_collections
with slim.arg_scope(arg_sc):
bottleneck = resnet_v2.bottleneck
blocks = []
for i, s in enumerate(stride):
b = resnet_v2.resnet_utils.Block(
'block{:d}'.format(i + 1), bottleneck, [{
'depth': output_neurons,
'depth_bottleneck': inside_neurons,
'stride': stride[i]
}])
blocks.append(b)
x, outs = resnet_v2.resnet_v2(x, blocks, num_classes=None, global_pool=False,
output_stride=None, include_root_block=False,
reuse=False, scope=name)
return x, outs
| apache-2.0 |
sunsistemo/mozzacella-automato-salad | salad.py | 1 | 4742 | from time import sleep
from optparse import OptionParser
import sys
import math
from math import log
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import random
from gmpy2 import digits
from rule30 import random_backup
STATE = None
def gen_rule(r, k, ruleNum):
rule = {}
hood = 2 * r + 1
bruleNum = digits(ruleNum, k).zfill(k ** hood)[::-1]
for s in range(k ** hood):
rule[digits(s, k).zfill(hood)] = int(bruleNum[s])
return rule
def step(rule, r):
global STATE
oldState = STATE
newState = ["0"] * len(oldState)
for n in range(r):
newState[n] = str(rule[oldState[-r+n:] + oldState[:r+n+1]])
for i in range(r, len(oldState) - r):
newState[i] = str(rule[oldState[i-r:i+r+1]])
for m in range(1, r + 1):
newState[-m] = str(rule[oldState[-r-m:] + oldState[:-m+r+1]])
STATE = "".join(newState)
def random_state(n, k):
k = "".join(map(str, range(k)))
return "".join([random.choice(k) for _ in range(n)])
def make_colormap(k):
return {str(i): "\033[3%dm%d\033[0m" % (1 + i, i) for i in range(k)}
def CA_print(r=1, k=2, rule_number=-1, size=150):
global STATE
rule = gen_rule(r, k, rule_number)
# print(rule)
STATE = random_state(size, k)
colormap = make_colormap(k)
try:
while True:
pstate = "".join([colormap[c] for c in STATE])
print(pstate)
sleep(.1)
step(rule, r)
except KeyboardInterrupt:
print("Rule number: %d" % rule_number)
def randnit(rule, r):
global STATE
step(rule, r)
return int(STATE[0])
def randint(a, b, rule, r, num_bits=None):
"""a and b are ints such that a < b."""
if num_bits is None:
interval = b - a
is_power_of_two = sum(int(i) for i in bin(interval)[2:]) == 1
if not is_power_of_two:
print("So long sucker!")
random_backup()
sys.exit()
num_bits = len(bin(interval)[2:]) - 1
bits = [0] * num_bits
for i in range(num_bits):
bits[i] = randbit(rule, r)
return a + int("".join(map(str, bits)), 2)
# Future randint function for any interval (not power of two) and multiple colors
def randintk(a, b, rule, k = 2, r = None, num_bits=None):
"""a and b are ints such that a < b."""
if num_bits is None:
interval = b - a
num_bits = math.ceil(math.log(interval,2))
bits = [0] * num_bits
if r is None:
r = (len(list(rule.keys())[0]) - 1)//2
rand = interval
while rand >= interval:
for i in range(num_bits):
bits[i] = randnit(rule, r)
rand = int("".join(map(str, bits)), 2)
return a + rand
def basekint(ls, base):
return sum([i * base ** (len(ls) - n - 1) for n, i in enumerate(ls)])
def bytestream(r, k, rule_number):
a, b = 0, 2 ** 8
num_nits = math.ceil(log(b) / log(k))
bytes_per_nyte = (k ** num_nits) // b
rule = gen_rule(r, k, rule_number)
write = sys.stdout.buffer.write
while True:
try:
randnyte = float("inf")
tries = 0
while randnyte > b * bytes_per_nyte:
randnyte = basekint([randnit(rule, r) for _ in range(num_nits)], k)
tries += 1
if tries > 11:
raise StableError
randbyte = randnyte % b
a = bytearray([randbyte])
write(a)
except (BrokenPipeError, IOError):
sys.stderr.close()
sys.exit(1)
def main():
global STATE
parser = OptionParser()
parser.set_defaults(rule_number='30', num_neighbour='1', num_colors='2')
parser.add_option('-r', '--rule', dest='rule_number',
help='Rule number to generate random number')
parser.add_option('-n', '--neighbour', dest='num_neighbour',
help='Radius of neighbours')
parser.add_option('-c', '--color', dest='num_colors',
help='Number of colors')
parser.add_option("-B", "--bytestream", action="store_true",
help='Option to output random bytes')
(options, args) = parser.parse_args()
rule_number = int(options.rule_number)
r = int(options.num_neighbour)
k = int(options.num_colors)
n = 261
STATE = random_state(n, k)
if options.bytestream:
return bytestream(r, k, rule_number)
if rule_number < 0 or rule_number >= k**(k**(2*r + 1)):
print("No proper rule number given for this CA setting, generating random rule...")
sleep(3)
rule_number = random.randint(0, k**(k**(2*r + 1)))
CA_print(r, k, rule_number)
class StableError(Exception):
pass
if __name__ == "__main__":
main()
| gpl-3.0 |
xuewei4d/scikit-learn | examples/compose/plot_compare_reduction.py | 17 | 4632 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=================================================================
Selecting dimensionality reduction with Pipeline and GridSearchCV
=================================================================
This example constructs a pipeline that does dimensionality
reduction followed by prediction with a support vector
classifier. It demonstrates the use of ``GridSearchCV`` and
``Pipeline`` to optimize over different classes of estimators in a
single CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality
reductions are compared to univariate feature selection during
the grid search.
Additionally, ``Pipeline`` can be instantiated with the ``memory``
argument to memoize the transformers within the pipeline, avoiding to fit
again the same transformers over and over.
Note that the use of ``memory`` to enable caching becomes interesting when the
fitting of a transformer is costly.
# %%
Illustration of ``Pipeline`` and ``GridSearchCV``
###############################################################################
This section illustrates the use of a ``Pipeline`` with ``GridSearchCV``
"""
# Authors: Robert McGibbon, Joel Nothman, Guillaume Lemaitre
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.decomposition import PCA, NMF
from sklearn.feature_selection import SelectKBest, chi2
print(__doc__)
pipe = Pipeline([
# the reduce_dim stage is populated by the param_grid
('reduce_dim', 'passthrough'),
('classify', LinearSVC(dual=False, max_iter=10000))
])
N_FEATURES_OPTIONS = [2, 4, 8]
C_OPTIONS = [1, 10, 100, 1000]
param_grid = [
{
'reduce_dim': [PCA(iterated_power=7), NMF()],
'reduce_dim__n_components': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
{
'reduce_dim': [SelectKBest(chi2)],
'reduce_dim__k': N_FEATURES_OPTIONS,
'classify__C': C_OPTIONS
},
]
reducer_labels = ['PCA', 'NMF', 'KBest(chi2)']
grid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid)
X, y = load_digits(return_X_y=True)
grid.fit(X, y)
mean_scores = np.array(grid.cv_results_['mean_test_score'])
# scores are in the order of param_grid iteration, which is alphabetical
mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS))
# select score for best C
mean_scores = mean_scores.max(axis=0)
bar_offsets = (np.arange(len(N_FEATURES_OPTIONS)) *
(len(reducer_labels) + 1) + .5)
plt.figure()
COLORS = 'bgrcmyk'
for i, (label, reducer_scores) in enumerate(zip(reducer_labels, mean_scores)):
plt.bar(bar_offsets + i, reducer_scores, label=label, color=COLORS[i])
plt.title("Comparing feature reduction techniques")
plt.xlabel('Reduced number of features')
plt.xticks(bar_offsets + len(reducer_labels) / 2, N_FEATURES_OPTIONS)
plt.ylabel('Digit classification accuracy')
plt.ylim((0, 1))
plt.legend(loc='upper left')
plt.show()
# %%
# Caching transformers within a ``Pipeline``
###############################################################################
# It is sometimes worthwhile storing the state of a specific transformer
# since it could be used again. Using a pipeline in ``GridSearchCV`` triggers
# such situations. Therefore, we use the argument ``memory`` to enable caching.
#
# .. warning::
# Note that this example is, however, only an illustration since for this
# specific case fitting PCA is not necessarily slower than loading the
# cache. Hence, use the ``memory`` constructor parameter when the fitting
# of a transformer is costly.
from joblib import Memory
from shutil import rmtree
# Create a temporary folder to store the transformers of the pipeline
location = 'cachedir'
memory = Memory(location=location, verbose=10)
cached_pipe = Pipeline([('reduce_dim', PCA()),
('classify', LinearSVC(dual=False, max_iter=10000))],
memory=memory)
# This time, a cached pipeline will be used within the grid search
# Delete the temporary cache before exiting
memory.clear(warn=False)
rmtree(location)
# %%
# The ``PCA`` fitting is only computed at the evaluation of the first
# configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The
# other configurations of ``C`` will trigger the loading of the cached ``PCA``
# estimator data, leading to save processing time. Therefore, the use of
# caching the pipeline using ``memory`` is highly beneficial when fitting
# a transformer is costly.
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
dvspirito/pymeasure | pymeasure/experiment/experiment.py | 1 | 9657 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
log = logging.getLogger()
log.addHandler(logging.NullHandler())
try:
from IPython import display
except ImportError:
log.warning("IPython could not be imported")
from .results import unique_filename
from .config import get_config, set_mpl_rcparams
from pymeasure.log import setup_logging, console_log
from pymeasure.experiment import Results, Worker
from .parameters import Measurable
import time, signal
import numpy as np
import tempfile
import gc
def get_array(start, stop, step):
"""Returns a numpy array from start to stop"""
step = np.sign(stop - start) * abs(step)
return np.arange(start, stop + step, step)
def get_array_steps(start, stop, numsteps):
"""Returns a numpy array from start to stop in numsteps"""
return get_array(start, stop, (abs(stop - start) / numsteps))
def get_array_zero(maxval, step):
"""Returns a numpy array from 0 to maxval to -maxval to 0"""
return np.concatenate((np.arange(0, maxval, step), np.arange(maxval, -maxval, -step),
np.arange(-maxval, 0, step)))
def create_filename(title):
"""
Create a new filename according to the style defined in the config file.
If no config is specified, create a temporary file.
"""
config = get_config()
if 'Filename' in config._sections.keys():
filename = unique_filename(suffix='_%s' % title, **config._sections['Filename'])
else:
filename = tempfile.mktemp()
return filename
class Experiment(object):
""" Class which starts logging and creates/runs the results and worker processes.
.. code-block:: python
procedure = Procedure()
experiment = Experiment(title, procedure)
experiment.start()
experiment.plot_live('x', 'y', style='.-')
for a multi-subplot graph:
import pylab as pl
ax1 = pl.subplot(121)
experiment.plot('x','y',ax=ax1)
ax2 = pl.subplot(122)
experiment.plot('x','z',ax=ax2)
experiment.plot_live()
:var value: The value of the parameter
:param title: The experiment title
:param procedure: The procedure object
:param analyse: Post-analysis function, which takes a pandas dataframe as input and
returns it with added (analysed) columns. The analysed results are accessible via
experiment.data, as opposed to experiment.results.data for the 'raw' data.
:param _data_timeout: Time limit for how long live plotting should wait for datapoints.
"""
def __init__(self, title, procedure, analyse=(lambda x: x)):
self.title = title
self.procedure = procedure
self.measlist = []
self.port = 5888
self.plots = []
self.figs = []
self._data = []
self.analyse = analyse
self._data_timeout = 10
config = get_config()
set_mpl_rcparams(config)
if 'Logging' in config._sections.keys():
self.scribe = setup_logging(log, **config._sections['Logging'])
else:
self.scribe = console_log(log)
self.scribe.start()
self.filename = create_filename(self.title)
log.info("Using data file: %s" % self.filename)
self.results = Results(self.procedure, self.filename)
log.info("Set up Results")
self.worker = Worker(self.results, self.scribe.queue, logging.DEBUG)
log.info("Create worker")
def start(self):
"""Start the worker"""
log.info("Starting worker...")
self.worker.start()
@property
def data(self):
"""Data property which returns analysed data, if an analyse function
is defined, otherwise returns the raw data."""
self._data = self.analyse(self.results.data.copy())
return self._data
def wait_for_data(self):
"""Wait for the data attribute to fill with datapoints."""
t = time.time()
while self.data.empty:
time.sleep(.1)
if (time.time() - t) > self._data_timeout:
log.warning('Timeout, no data received for liveplot')
return False
return True
def plot_live(self, *args, **kwargs):
"""Live plotting loop for jupyter notebook, which automatically updates
(an) in-line matplotlib graph(s). Will create a new plot as specified by input
arguments, or will update (an) existing plot(s)."""
if self.wait_for_data():
if not (self.plots):
self.plot(*args, **kwargs)
while not self.worker.should_stop():
self.update_plot()
display.clear_output(wait=True)
if self.worker.is_alive():
self.worker.terminate()
self.scribe.stop()
def plot(self, *args, **kwargs):
"""Plot the results from the experiment.data pandas dataframe. Store the
plots in a plots list attribute."""
if self.wait_for_data():
kwargs['title'] = self.title
ax = self.data.plot(*args, **kwargs)
self.plots.append({'type': 'plot', 'args': args, 'kwargs': kwargs, 'ax': ax})
if ax.get_figure() not in self.figs:
self.figs.append(ax.get_figure())
self._user_interrupt = False
def clear_plot(self):
"""Clear the figures and plot lists."""
for fig in self.figs:
fig.clf()
pl.close()
self.figs = []
self.plots = []
gc.collect()
def update_plot(self):
"""Update the plots in the plots list with new data from the experiment.data
pandas dataframe."""
try:
tasks = []
self.data
for plot in self.plots:
ax = plot['ax']
if plot['type'] == 'plot':
x, y = plot['args'][0], plot['args'][1]
if type(y) == str:
y = [y]
for yname, line in zip(y, ax.lines):
self.update_line(ax, line, x, yname)
if plot['type'] == 'pcolor':
x, y, z = plot['x'], plot['y'], plot['z']
update_pcolor(ax, x, y, z)
display.clear_output(wait=True)
display.display(*self.figs)
time.sleep(0.1)
except KeyboardInterrupt:
display.clear_output(wait=True)
display.display(*self.figs)
self._user_interrupt = True
def pcolor(self, xname, yname, zname, *args, **kwargs):
"""Plot the results from the experiment.data pandas dataframe in a pcolor graph.
Store the plots in a plots list attribute."""
title = self.title
x, y, z = self._data[xname], self._data[yname], self._data[zname]
shape = (len(y.unique()), len(x.unique()))
diff = shape[0] * shape[1] - len(z)
Z = np.concatenate((z.values, np.zeros(diff))).reshape(shape)
df = pd.DataFrame(Z, index=y.unique(), columns=x.unique())
ax = sns.heatmap(df)
pl.title(title)
pl.xlabel(xname)
pl.ylabel(yname)
ax.invert_yaxis()
pl.plt.show()
self.plots.append(
{'type': 'pcolor', 'x': xname, 'y': yname, 'z': zname, 'args': args, 'kwargs': kwargs,
'ax': ax})
if ax.get_figure() not in self.figs:
self.figs.append(ax.get_figure())
def update_pcolor(self, ax, xname, yname, zname):
"""Update a pcolor graph with new data."""
x, y, z = self._data[xname], self._data[yname], self._data[zname]
shape = (len(y.unique()), len(x.unique()))
diff = shape[0] * shape[1] - len(z)
Z = np.concatenate((z.values, np.zeros(diff))).reshape(shape)
df = pd.DataFrame(Z, index=y.unique(), columns=x.unique())
cbar_ax = ax.get_figure().axes[1]
sns.heatmap(df, ax=ax, cbar_ax=cbar_ax)
ax.set_xlabel(xname)
ax.set_ylabel(yname)
ax.invert_yaxis()
def update_line(self, ax, hl, xname, yname):
"""Update a line in a matplotlib graph with new data."""
del hl._xorig, hl._yorig
hl.set_xdata(self._data[xname])
hl.set_ydata(self._data[yname])
ax.relim()
ax.autoscale()
gc.collect()
def __del__(self):
self.scribe.stop()
if self.worker.is_alive():
self.worker.recorder_queue.put(None)
self.worker.monitor_queue.put(None)
self.worker.stop()
| mit |
MohammedWasim/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
buqing2009/MissionPlanner | Lib/site-packages/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-3.0 |
marcocaccin/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
clemkoa/scikit-learn | doc/datasets/conftest.py | 7 | 2125 | from os.path import exists
from os.path import join
import numpy as np
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_network
from sklearn.datasets import get_data_home
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def setup_labeled_faces():
data_home = get_data_home()
if not exists(join(data_home, 'lfw_home')):
raise SkipTest("Skipping dataset loading doctests")
def setup_mldata():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_mldata():
uninstall_mldata_mock()
def setup_rcv1():
check_skip_network()
# skip the test in rcv1.rst if the dataset is not already loaded
rcv1_dir = join(get_data_home(), "RCV1")
if not exists(rcv1_dir):
raise SkipTest("Download RCV1 dataset to run this test.")
def setup_twenty_newsgroups():
data_home = get_data_home()
if not exists(join(data_home, '20news_home')):
raise SkipTest("Skipping dataset loading doctests")
def setup_working_with_text_data():
check_skip_network()
def pytest_runtest_setup(item):
fname = item.fspath.strpath
if fname.endswith('datasets/labeled_faces.rst'):
setup_labeled_faces()
elif fname.endswith('datasets/mldata.rst'):
setup_mldata()
elif fname.endswith('datasets/rcv1.rst'):
setup_rcv1()
elif fname.endswith('datasets/twenty_newsgroups.rst'):
setup_twenty_newsgroups()
elif fname.endswith('datasets/working_with_text_data.rst'):
setup_working_with_text_data()
def pytest_runtest_teardown(item):
fname = item.fspath.strpath
if fname.endswith('datasets/mldata.rst'):
teardown_mldata()
| bsd-3-clause |
chetan51/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py | 73 | 7068 | import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| gpl-3.0 |
dirmeier/dataframe | tests/test_chaining.py | 1 | 3789 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = '[email protected]'
import pytest
import unittest
import dataframe
import scipy.stats as sps
from dataframe import group, modify, subset, aggregate
from sklearn import datasets
from statistics import mean
import re
class Mean(dataframe.Callable):
def __call__(self, *args):
vals = args[0].values
return mean(vals)
class Zscore(dataframe.Callable):
def __call__(self, *args):
vals = args[0].values
return sps.zscore(vals).tolist()
class TestPipes(unittest.TestCase):
def setUp(self):
iris_data = datasets.load_iris()
features = [re.sub("\s|cm|\(|\)", "", x) for x in
iris_data.feature_names]
data = {features[i]: iris_data.data[:, i] for i in
range(len(iris_data.data[1, :]))}
data["target"] = iris_data.target
self.__frame = dataframe.DataFrame(**data)
def test_group_piped(self):
tab = self.__frame >> group("target")
assert len(tab.groups) == 3
def test_group(self):
tab = group(self.__frame, "target")
assert len(tab.groups) == 3
def test_group_double(self):
tab = group(self.__frame, "target") >> group("petalwidth")
assert isinstance(tab, dataframe.GroupedDataFrame)
def test_aggregate(self):
tab = aggregate(self.__frame, Mean, "mean", "petalwidth")
assert len(tab["mean"]) == 1
def test_aggregate_piped(self):
tab = self.__frame >> aggregate(Mean, "mean", "petalwidth")
assert len(tab["mean"]) == 1
def test_group_aggregate(self):
tab = self.__frame >> \
group("target") >> \
aggregate(Mean, "mean", "petalwidth")
assert len(tab["mean"]) == 3
def test_modify_piped(self):
tab = self.__frame >> modify(Zscore, "z", "petalwidth")
assert tab.nrow == self.__frame.nrow
def test_modify(self):
tab = modify(self.__frame, Zscore, "z", "petalwidth")
assert tab.nrow == self.__frame.nrow
def test_subset_piped(self):
tab = self.__frame >> subset("petalwidth")
assert tab.ncol == 1
def test_subset(self):
tab = subset(self.__frame , "petalwidth")
assert tab.ncol == 1
def test_modify_subset(self):
tab = modify(self.__frame, Zscore, "z", "target") >> \
subset("z")
assert tab.ncol == 1
def test_modify_subset(self):
tab = modify(self.__frame, Zscore, "z", "target") >> \
subset("z")
assert tab.ncol == 1
def test_aggregate_subset(self):
tab = aggregate(self.__frame, Mean, "m", "target") >> \
subset("m")
assert tab.nrow == 1
def test_random_pipeing(self):
tab = self.__frame >> \
group("target") >> \
modify(Zscore, "z", "petalwidth") >> \
subset("z") >> \
aggregate(Mean, "m", "z")
assert tab.ncol == 2 and tab.nrow == 3
| gpl-3.0 |
sknepneklab/SAMoS | analysis/plot_analysis_polar/plot_profiles_oneJ.py | 1 | 7689 | # ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
#! /usr/bin/python
import sys, os, glob
import cPickle as pickle
import numpy as np
import scipy as sp
from scipy.io import savemat
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
#from matplotlib import rc
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
# setting global parameters
#matplotlib.rcParams['text.usetex'] = 'true'
matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['axes.linewidth'] = 2
matplotlib.rcParams['xtick.major.size'] = 8
matplotlib.rcParams['ytick.major.size'] = 8
matplotlib.rcParams['font.size']=20
matplotlib.rcParams['legend.fontsize']=14
cdict = {'red': [(0.0, 0.75, 0.75),
(0.3, 1.0, 1.0),
(0.5, 0.4, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.5),
(0.5, 1.0, 1.0),
(0.75, 0.5, 0.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.7, 1.0, 1.0),
(1.0, 0.25, 0.25)]}
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder= '/home/silke/Documents/CurrentProjects/Rastko/analysis/data/SilkeJ1/'
outfolder= '/home/silke/Documents/CurrentProjects/Rastko/analysis/'
JList=['10', '1', '0.1', '0.01']
#vList=['0.005','0.01','0.02','0.05','0.1','0.2','0.5','1']
vList=['0.005','0.05','0.1','0.2','0.5','1']
#JList=['10']
#vList=['1']
nbin=180
rval=28.2094791
testmap=LinearSegmentedColormap('test',cdict,N=len(vList))
testmap2=LinearSegmentedColormap('test',cdict,N=len(JList))
nstep=10000000
nsave=10000
nsnap=int(nstep/nsave)
skip=int(nsnap/2)
dt=0.001
# Profiles
# Set column to plot
usecolumn=1
profList=[r'$\theta$',r'$\rho$',r'$\sqrt{\langle v^2 \rangle}/v_0$','energy','pressure',r'$\Sigma_{\theta \theta}$',r'$\Sigma_{\theta \phi}$',r'$\Sigma_{\phi \theta}$',r'$\Sigma_{\phi \phi}$',r'$\alpha$',r'$\alpha_v$']
profName=['theta','rho','vrms','energy','pressure','stt','stp','spt','spp','alpha','alpha_v']
plt.figure(figsize=(10,7),linewidth=2.0)
for i in range(0,len(vList)):
profiles=np.zeros((11,180))
for j in range(len(JList)):
print vList[i],JList[j]
ax=plt.gca()
outfile=basefolder+'profilesV_v0' + vList[i] + '_j' + JList[j] + '.dat'
outfile2=basefolder + 'axisV_v0' + vList[i] + '_j' + JList[j] + '.dat'
# header='theta rho vel energy pressure alpha alpha_v'
profiles+=sp.loadtxt(outfile, unpack=True)[:,:]
profiles/=len(JList)
isdata=[index for index,value in enumerate(profiles[1,:]) if (value >0)]
if (usecolumn==9)and(i==1):
plt.plot(profiles[0,isdata],0.45*profiles[0,isdata],'--',color=(0,0,0),label='slope 0.45')
plt.ylim(-0.5,0.5)
if usecolumn==2:
plt.plot(profiles[0,isdata],profiles[usecolumn,isdata]/float(vList[i]),color=testmap(i), linestyle='solid',label=vList[i])
else:
plt.plot(profiles[0,isdata],profiles[usecolumn,isdata],color=testmap(i), linestyle='solid',label=vList[i])
#if usecolumn<=8:
#plt.ylim(0,1.05*profiles[usecolumn,nbin/2])
plt.xlim(-np.pi/2,np.pi/2)
plt.xlim(-1.5,1.5)
plt.ylim(0,4.5)
plt.xlabel(profList[0])
plt.ylabel(profList[usecolumn])
plt.legend(loc=2,ncol=1)
#plt.title('Velocity ' + r'$v_0=$' + vList[i])
#filename=outfolder + 'pics/profile_' + profName[usecolumn] + '_allJ.pdf'
#plt.savefig(filename)
#for i in range(len(vList)):
#plt.figure(figsize=(10,7),linewidth=2.0)
#profiles=np.zeros((11,180))
#for j in range(len(JList)):
#print vList[i],JList[j]
#ax=plt.gca()
#outfile=outfolder+'data/profilesV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#outfile2=outfolder + 'data/axisV_v0' + vList[i] + '_j' + JList[j] + '.dat'
## header='theta rho vel energy pressure alpha alpha_v'
#profiles+=sp.loadtxt(outfile, unpack=True)[:,:]
#profiles/=len(JList)
#isdata=[index for index,value in enumerate(profiles[1,:]) if (value >0)]
#plt.plot(profiles[0,isdata],profiles[4,isdata],color='k', linestyle='solid',label=r'$Tr(\Sigma)$')
#plt.plot(profiles[0,isdata],profiles[5,isdata],color='r', linestyle='solid',label=r'$\Sigma_{\theta \theta}$')
#plt.plot(profiles[0,isdata],profiles[6,isdata],color='g', linestyle='solid',label=r'$\Sigma_{\theta \phi}$')
#plt.plot(profiles[0,isdata],profiles[8,isdata],color='b', linestyle='solid',label=r'$\Sigma_{\phi \phi}$')
##plt.xlim(-np.pi/2,np.pi/2)
#plt.xlabel(profList[0])
#plt.ylabel('stresses')
#plt.legend(loc=2,ncol=2)
#plt.title('Velocity ' + r'$v_0=$' + vList[i])
#filename=outfolder + 'pics/stress_profiles_v' + vList[i] +'.pdf'
#plt.savefig(filename)
## Axis in 3d
#for i in range(len(vList)):
#fig=plt.figure(figsize=(10,7),linewidth=2.0)
#ax = fig.add_subplot(111, projection='3d')
#for j in range(len(JList)):
#print vList[i],JList[j]
#ax=plt.gca()
#outfile=outfolder+'data/profilesV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#outfile2=outfolder + 'data/axisV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#axis=sp.loadtxt(outfile2, unpack=True)[:,:]
#ax.scatter(axis[0,:], axis[1,:], axis[2,:], zdir='z',color=testmap2(j), linestyle='solid',label=JList[j])
#plt.xlabel('x')
#plt.ylabel('y')
##plt.xlim(-1,1)
##plt.ylim(-1,1)
##plt.zlim(-1,1)
##ax.zlabel('z')
#ax.legend()
#plt.legend()
#plt.title('Velocity ' + r'$v_0=$' + vList[i])
#filename=outfolder + 'pics/axisV_' + profName[usecolumn] + '_v' + vList[i] +'.pdf'
#plt.savefig(filename)
## Order parameter n = |\frac{1}{N R v_0} \sum r \times v|
#fig=plt.figure(figsize=(10,7),linewidth=2.0)
#ax=plt.gca()
#orderpar=np.zeros((len(vList),len(JList)))
#order=np.zeros((len(vList),))
#dorder=np.zeros((len(vList),))
#vval=np.zeros((len(vList),))
#for i in range(len(vList)):
#for j in range(len(JList)):
#print vList[i],JList[j]
#outfile=outfolder+'data/profilesV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#outfile2=outfolder + 'data/axisV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#axis=sp.loadtxt(outfile2, unpack=True)[:,:]
#orderpar0=np.sqrt(axis[3,:]**2+axis[4,:]**2+axis[5,:]**2)
#orderpar[i,j]=np.mean(orderpar0)
#order[i]=np.mean(orderpar[i,:])
#dorder[i]=np.std(orderpar[i,:])
#vval[i]=np.log10(float(vList[i]))
#plt.errorbar(vval,order,yerr=dorder,color=testmap2(j), linestyle='solid',marker='o',markersize=10,label='J=1')
#del vList
#del JList
#JList=['0.1']
#vList=['0.05','0.5','5.0']
#orderpar=np.zeros((len(vList),))
#dorder=np.zeros((len(vList),))
#vval=np.zeros((len(vList),))
#for j in range(len(JList)):
#for i in range(len(vList)):
#print vList[i],JList[j]
#outfile=outfolder+'data_Rastko/profilesV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#outfile2=outfolder + 'data_Rastko/axisV_v0' + vList[i] + '_j' + JList[j] + '.dat'
#axis=sp.loadtxt(outfile2, unpack=True)[:,:]
#orderpar0=np.sqrt(axis[3,:]**2+axis[4,:]**2+axis[5,:]**2)
#orderpar[i]=np.mean(orderpar0)
#dorder[i]=np.std(orderpar0)
#vval[i]=np.log10(float(vList[i]))
#plt.errorbar(vval,orderpar,yerr=dorder,color=testmap2(j), linestyle='solid',marker='s',markersize=10,label='J='+JList[j])
#plt.xlabel(r'$\log_{10}v_0$')
#plt.ylabel('n')
#plt.ylim(0,1)
#plt.legend(loc=2,ncol=1)
#plt.title('Order parameter')
plt.show()
| gpl-3.0 |
lzz5235/Code-Segment | AISProject/ais_static.py | 1 | 12483 | import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.basemap import Basemap
import ais_parse as parse
import pandas as pd
from pandas import datetime
from sklearn.cluster import DBSCAN,MeanShift,AffinityPropagation
from sklearn.metrics import euclidean_distances
import json
from collections import OrderedDict
def formatShipLine(one_MMSI):
x_list = []
y_list = []
date_list = []
speed_list = []
for shipdata in one_MMSI:
shipMMSI = shipdata['MMSI']
x = shipdata['DynamicInfo'][0]['Longitude'] # jingdu
y = shipdata['DynamicInfo'][0]['Latitude'] # weidu
datetime = shipdata['DynamicInfo'][0]['LastTime']
speed = shipdata['DynamicInfo'][0]['Speed'] #Speed
x_list.append(x)
y_list.append(y)
date_list.append(datetime)
speed_list.append(speed)
date = np.array(date_list)
x = np.array(x_list)
rows = x.shape
for i in range(rows[0]):
if x[i] <=0:
x[i] = 360 + x[i]
y = np.array(y_list)
table = pd.DataFrame(columns=['Date','X','Y','Speed'])
table['Date'] = date
table['X'] = x
table['Y'] = y
table['Speed'] = np.array(speed_list)
table.set_index('Date',drop=False,append=False,inplace=True,verify_integrity=False)
table = table.sort_index()
# table.to_csv('look.csv')
return shipMMSI,table
def compare(lat0,lon0,lat1,lon1):
delta0 = abs(abs(float(lat0)) - abs(float(lat1)))
delta1 = abs(abs(float(lon0)) - abs(float(lon1)))
if delta0 > 0.0001 and delta1 > 0.0001:
return True
return False
def compute_date(date0,date1):
d1 = datetime.strptime(date0,'%Y-%m-%d %H:%M:%S')
d2 = datetime.strptime(date1,'%Y-%m-%d %H:%M:%S')
hours = ((d2-d1).days * 24 *3600 + (d2-d1).seconds)/3600
return hours
def find_stop_place(mmsi,data):
rows,cols = data.shape
# print data
i = 0
while i < rows:
if float(data.iloc[i]['Speed']) <=0.05 and float(data.iloc[i]['Speed']) >=0:
j = i
while i < rows and j < rows:
if float(data.iloc[j]['Speed']) > 1 and compare(data.iloc[j-1]['X'],data.iloc[j-1]['Y'],data.iloc[j][
'X'],data.iloc[j]['Y']):
break
j += 1
mean_lon = np.mean(data.iloc[i:j,1].values)
if mean_lon >=180:
mean_lon -= 360
mean_lat = np.mean(data.iloc[i:j,2].values)
# MMSI,Jindu,Weidu,Start_time,End_time,Point nums,Hours
string = str(mmsi) + ',' + str(mean_lon) + ',' + str(mean_lat)+ ',' + data.iloc[i]['Date'] + ',' \
+ data.iloc[j-1]['Date'] + ',' + str(int(j-i)) + ',' +\
str(compute_date(str(data.iloc[i]['Date']),str(data.iloc[j-1]['Date'])))
print string
with open('./ShipLineTest/ais_stop.txt', 'a') as f: # ST
f.write(string + '\n')
i = j
i += 1
def scatter(fileName):
colums = np.array([u'MMSI',u'Longtitude',u'Latitude',u'Start_time',u'End_time',u'PointNum',u'Hours'])
table = pd.read_csv(fileName,sep=',',header=None,names=colums)
x = table['Longtitude'].values
y = table['Latitude'].values
PointNum = table['PointNum'].values
rows = x.shape
for i in range(rows[0]):
if x[i] <= 0:
x[i] = 360 + x[i]
map = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=0, urcrnrlon=360, resolution='c')
map.drawcoastlines()
map.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
map.drawmeridians(np.arange(map.lonmin, map.lonmax + 30, 60), labels=[0, 0, 0, 1])
map.drawmapboundary(fill_color='black')
x, y = map(x, y)
map.scatter(x, y, 25, cmap=plt.cm.jet, marker='o', edgecolors='none', zorder=10)
map.drawcoastlines(linewidth=0.5, color='y')
map.drawcountries(color='y')
map.drawstates(color='y')
map.fillcontinents(color='grey', lake_color='black')
plt.title('China Ship Map')
plt.show()
def DBSCAN_scatter(fileName):
colums = np.array([u'MMSI', u'Longtitude', u'Latitude', u'Start_time', u'End_time', u'PointNum', u'Hours'])
table = pd.read_csv(fileName, sep=',', header=None, names=colums)
x = table['Longtitude'].values.reshape(-1,1)
y = table['Latitude'].values.reshape(-1,1)
PointNum = table['PointNum'].values
rows = x.shape
for i in range(rows[0]):
if x[i] <= 0:
x[i] = 360 + x[i]
data = np.hstack((x,y))
model = DBSCAN(eps=0.09,min_samples=3)
model.fit(data)
y_hat = model.labels_
core_indices = np.zeros_like(y_hat,dtype=bool)
core_indices[model.core_sample_indices_] = True
y_unique = np.unique(y_hat)
print y_hat.size
print model.labels_,y_unique.size
print model.core_sample_indices_ # Belongs to which class
map = Basemap(projection='mill', lon_0=180, width=12000000, height=9000000)
map.drawcoastlines()
map.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
map.drawmeridians(np.arange(map.lonmin, map.lonmax + 30, 60), labels=[0, 0, 0, 1])
map.drawmapboundary(fill_color='white')
map.drawcoastlines(linewidth=0.5, color='y')
map.drawcountries(color='y')
map.drawstates(color='y')
map.fillcontinents(color='coral', lake_color='blue')
data[:,0], data[:,1] = map(data[:,0], data[:,1])
clrs = plt.cm.Spectral(np.linspace(0,0.8,y_unique.size))
for k,clr in zip(y_unique,clrs):
cur = (y_hat == k)
if k == -1:
# map.scatter(data[cur,0],data[cur,1],s=20,c='k') # nosie
continue
map.scatter(data[cur,0],data[cur,1],s=20,c=clr,edgecolors='k')
map.scatter(data[cur & core_indices][:,0],data[cur & core_indices][:,1],60, c=clr, marker='*',
edgecolors='none', zorder=10)
map.drawcoastlines(linewidth=0.5, color='y')
map.drawcountries(color='y')
map.drawstates(color='y')
# map.fillcontinents(color='grey', lake_color='black')
plt.title('DBSCAN Ship Stop')
plt.show()
def MeanShift_scatter(fileName):
colums = np.array([u'MMSI', u'Longtitude', u'Latitude', u'Start_time', u'End_time', u'PointNum', u'Hours'])
table = pd.read_csv(fileName, sep=',', header=None, names=colums)
x = table['Longtitude'].values.reshape(-1,1)
y = table['Latitude'].values.reshape(-1,1)
PointNum = table['PointNum'].values
rows = x.shape
for i in range(rows[0]):
if x[i] <= 0:
x[i] = 360 + x[i]
data = np.hstack((x,y))
model = MeanShift(bin_seeding=True,bandwidth=4,n_jobs=-2)
ms = model.fit(data)
centers = ms.cluster_centers_
y_hat = model.labels_
y_unique = np.unique(y_hat)
print model.labels_,y_unique.size
# map = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=0, urcrnrlon=360, resolution='c')
# map.drawcoastlines()
# map.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
# map.drawmeridians(np.arange(map.lonmin, map.lonmax + 30, 60), labels=[0, 0, 0, 1])
# map.drawmapboundary(fill_color='black')
# data[:,0], data[:,1] = map(data[:,0], data[:,1])
clrs = plt.cm.Spectral(np.linspace(0,255,y_unique.size))
for k,clr in zip(y_unique,clrs):
cur = (y_hat == k)
plt.scatter(data[cur,0],data[cur,1],s=20,c = 'r',edgecolors='none')
plt.scatter(centers[:,0],centers[:,1],s=60,c=clrs,marker='*',edgecolors='k')
# map.drawcoastlines(linewidth=0.5, color='y')
# map.drawcountries(color='y')
# map.drawstates(color='y')
# map.fillcontinents(color='grey', lake_color='black')
plt.title('MeanShift Ship Stop')
plt.show()
def AP_scatter(fileName):
colums = np.array([u'MMSI', u'Longtitude', u'Latitude', u'Start_time', u'End_time', u'PointNum', u'Hours'])
table = pd.read_csv(fileName, sep=',', header=None, names=colums)
x = table['Longtitude'].values.reshape(-1,1)
y = table['Latitude'].values.reshape(-1,1)
PointNum = table['PointNum'].values
rows = x.shape
for i in range(rows[0]):
if x[i] <= 0:
x[i] = 360 + x[i]
data = np.hstack((x,y))
m = euclidean_distances(data,squared=True)
preferences = -np.median(m)
model = AffinityPropagation(affinity='euclidean',preference=preferences)
ms = model.fit(data)
centers = ms.cluster_centers_
y_hat = model.labels_
y_unique = np.unique(y_hat)
print model.labels_,y_unique.size
map = Basemap(projection='cyl', llcrnrlat=-90, urcrnrlat=90, llcrnrlon=0, urcrnrlon=360, resolution='c')
map.drawcoastlines()
map.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
map.drawmeridians(np.arange(map.lonmin, map.lonmax + 30, 60), labels=[0, 0, 0, 1])
map.drawmapboundary(fill_color='black')
data[:,0], data[:,1] = map(data[:,0], data[:,1])
clrs = plt.cm.Spectral(np.linspace(0,255,y_unique.size))
for k,clr in zip(y_unique,clrs):
cur = (y_hat == k)
map.scatter(data[cur,0],data[cur,1], 10, cmap=clr, marker='o', edgecolors='none', zorder=10)
map.scatter(centers[:,0],centers[:,1],180,cmap=plt.cm.jet,marker='*')
map.drawcoastlines(linewidth=0.5, color='y')
map.drawcountries(color='y')
map.drawstates(color='y')
# map.fillcontinents(color='grey', lake_color='black')
plt.title('AP Ship Stop')
plt.show()
def Classification_By_Speed(PathName,Output):
data_paths = [os.path.join(PathName, s) for s in os.listdir(PathName)]
speed_distribution = {}
for file in data_paths:
one_MMSI = []
parse.get_data_DY(file,one_MMSI)
sp = one_MMSI[0]['DynamicInfo'][0]['Speed']
if sp <=0:
continue
if speed_distribution.has_key(int(sp)):
speed_distribution[int(sp)] += 1
else:
speed_distribution[int(sp)] = 1
print speed_distribution
with open(Output,'w') as f:
json.dump(speed_distribution,f)
def plot_hist(filePath,X_label,Y_label,title):
speed_bins = []
speed_count = []
with open(filePath,'r') as f:
speed_distribution = f.readlines()[0]
speed_distribution = json.loads(speed_distribution)
d = OrderedDict(sorted(speed_distribution.items(),key=lambda x:int(x[0])))
# keys = d.keys()
# sorted(keys,key=lambda x:int(x[0]))
# d = [[key,d[key]] for key in keys]
# sorted(speed_distribution.items(),lambda x,y:cmp(int(x[0]),int(y[0])))
# print speed_distribution
for key,value in d.items():
if int(key) <= 0:
continue
speed_bins.append(int(key))
speed_count.append(value)
print key,value
speed_bins = np.array(speed_bins).flatten()
speed_count = np.array(speed_count).flatten()
# plt.hist(speed_count,bins=16,normed=1)
print speed_bins,len(speed_bins)
print speed_count,len(speed_count)
plt.plot(speed_bins,speed_count,'r-')
plt.bar(speed_bins,speed_count,color='b')
plt.xlabel(X_label)
plt.ylabel(Y_label)
plt.title(title)
plt.show()
if __name__ == "__main__":
all_MMSI = []
one_MMSI = []
center_x = 139.59
center_y = 35.26
theta = 5
center_lux = center_x-theta
center_luy = center_y+theta
center_rdx = center_x+theta
center_rdy = center_y-theta
##########################################
# data_paths = [os.path.join("./InterstTarget/", s) for s in os.listdir('./InterstTarget/')]
# for file in data_paths:
# one_MMSI = []
# parse.get_data_DY(file,one_MMSI)
# mmsi,data = formatShipLine(one_MMSI) # one Shipline
# find_stop_place(mmsi,data)
# scatter('./ShipLineTest/ais_stop.txt')
# DBSCAN_scatter('./ShipLineTest/ais_stop.txt')
#########################################
# one_MMSI = []
# parse.get_data_DY('./days/366576000',one_MMSI)
# mmsi,data = formatShipLine(one_MMSI) # one Shipline
# find_stop_place(mmsi,data)
#########################################
# AP_scatter('./ShipLineTest/ais_stop.txt')
# Classification_By_Speed('./ShipGARGO/','./ShipLineTest/cls_gargo_speed.txt')
# plot_hist('./ShipLineTest/cls_gargo_speed.txt',"Speed","Speed Count","The Speed Counter of ShipGARGO")
# Classification_By_Speed('./ShipTANKER/','./ShipLineTest/cls_tanker_speed.txt')
plot_hist('./ShipLineTest/cls_tanker_speed.txt',"Speed","Speed Count","The Speed Counter of Ship TANKER") | mit |
cpcloud/seaborn | seaborn/distributions.py | 1 | 33129 | """Plottng functions for visualizing distributions."""
from __future__ import division
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
try:
import statsmodels.api as sm
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six.moves import range
from .utils import set_hls_values, desaturate, percentiles, iqr, _kde_support
from .palettes import color_palette, husl_palette, blend_palette
from .axisgrid import JointGrid
def _box_reshape(vals, groupby, names, order):
"""Reshape the box/violinplot input options and find plot labels."""
# Set up default label outputs
xlabel, ylabel = None, None
# If order is provided, make sure it was used correctly
if order is not None:
# Assure that order is the same length as names, if provided
if names is not None:
if len(order) != len(names):
raise ValueError("`order` must have same length as `names`")
# Assure that order is only used with the right inputs
is_pd = isinstance(vals, pd.Series) or isinstance(vals, pd.DataFrame)
if not is_pd:
raise ValueError("`vals` must be a Pandas object to use `order`.")
# Handle case where data is a wide DataFrame
if isinstance(vals, pd.DataFrame):
if order is not None:
vals = vals[order]
if names is None:
names = vals.columns.tolist()
if vals.columns.name is not None:
xlabel = vals.columns.name
vals = vals.values.T
# Handle case where data is a long Series and there is a grouping object
elif isinstance(vals, pd.Series) and groupby is not None:
groups = pd.groupby(vals, groupby).groups
order = sorted(groups) if order is None else order
if hasattr(groupby, "name"):
if groupby.name is not None:
xlabel = groupby.name
if vals.name is not None:
ylabel = vals.name
vals = [vals.reindex(groups[name]) for name in order]
if names is None:
names = order
else:
# Handle case where the input data is an array or there was no groupby
if hasattr(vals, 'shape'):
if len(vals.shape) == 1:
if np.isscalar(vals[0]):
vals = [vals]
else:
vals = list(vals)
elif len(vals.shape) == 2:
nr, nc = vals.shape
if nr == 1:
vals = [vals]
elif nc == 1:
vals = [vals.ravel()]
else:
vals = [vals[:, i] for i in range(nc)]
else:
error = "Input `vals` can have no more than 2 dimensions"
raise ValueError(error)
# This should catch things like flat lists
elif np.isscalar(vals[0]):
vals = [vals]
# By default, just use the plot positions as names
if names is None:
names = list(range(1, len(vals) + 1))
elif hasattr(names, "name"):
if names.name is not None:
xlabel = names.name
# Now convert vals to a common representation
# The plotting functions will work with a list of arrays
# The list allows each array to possibly be of a different length
vals = [np.asarray(a, np.float) for a in vals]
return vals, xlabel, ylabel, names
def _box_colors(vals, color):
"""Find colors to use for boxplots or violinplots."""
if color is None:
colors = husl_palette(len(vals), l=.7)
else:
try:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color for _ in vals]
except ValueError:
colors = color_palette(color, len(vals))
# Desaturate a bit because these are patches
colors = [mpl.colors.colorConverter.to_rgb(c) for c in colors]
colors = [desaturate(c, .7) for c in colors]
# Determine the gray color for the lines
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in colors]
l = min(light_vals) * .6
gray = (l, l, l)
return colors, gray
def boxplot(vals, groupby=None, names=None, join_rm=False, order=None,
color=None, alpha=None, fliersize=3, linewidth=1.5, widths=.8,
label=None, ax=None, **kwargs):
"""Wrapper for matplotlib boxplot with better aesthetics and functionality.
Parameters
----------
vals : DataFrame, Series, 2D array, list of vectors, or vector.
Data for plot. DataFrames and 2D arrays are assumed to be "wide" with
each column mapping to a box. Lists of data are assumed to have one
element per box. Can also provide one long Series in conjunction with
a grouping element as the `groupy` parameter to reshape the data into
several boxes. Otherwise 1D data will produce a single box.
groupby : grouping object
If `vals` is a Series, this is used to group into boxes by calling
pd.groupby(vals, groupby).
names : list of strings, optional
Names to plot on x axis; otherwise plots numbers. This will override
names inferred from Pandas inputs.
order : list of strings, optional
If vals is a Pandas object with name information, you can control the
order of the boxes by providing the box names in your preferred order.
join_rm : boolean, optional
If True, positions in the input arrays are treated as repeated
measures and are joined with a line plot.
color : mpl color, sequence of colors, or seaborn palette name
Inner box color.
alpha : float
Transparancy of the inner box color.
fliersize : float, optional
Markersize for the fliers.
linewidth : float, optional
Width for the box outlines and whiskers.
ax : matplotlib axis, optional
Existing axis to plot into, otherwise grab current axis.
kwargs : additional keyword arguments to boxplot
Returns
-------
ax : matplotlib axis
Axis where boxplot is plotted.
"""
if ax is None:
ax = plt.gca()
# Reshape and find labels for the plot
vals, xlabel, ylabel, names = _box_reshape(vals, groupby, names, order)
# Draw the boxplot using matplotlib
boxes = ax.boxplot(vals, patch_artist=True, widths=widths, **kwargs)
# Find plot colors
colors, gray = _box_colors(vals, color)
# Set the new aesthetics
for i, box in enumerate(boxes["boxes"]):
box.set_color(colors[i])
if alpha is not None:
box.set_alpha(alpha)
box.set_edgecolor(gray)
box.set_linewidth(linewidth)
for i, whisk in enumerate(boxes["whiskers"]):
whisk.set_color(gray)
whisk.set_linewidth(linewidth)
whisk.set_linestyle("-")
for i, cap in enumerate(boxes["caps"]):
cap.set_color(gray)
cap.set_linewidth(linewidth)
for i, med in enumerate(boxes["medians"]):
med.set_color(gray)
med.set_linewidth(linewidth)
for i, fly in enumerate(boxes["fliers"]):
fly.set_color(gray)
fly.set_marker("d")
fly.set_markeredgecolor(gray)
fly.set_markersize(fliersize)
# This is a hack to get labels to work
# It's unclear whether this is actually broken in matplotlib or just not
# implemented, either way it's annoying.
if label is not None:
pos = kwargs.get("positions", [1])[0]
med = np.median(vals[0])
color = colors[0]
ax.add_patch(plt.Rectangle([pos, med], 0, 0, color=color, label=label))
# Is this a vertical plot?
vertical = kwargs.get("vert", True)
# Draw the joined repeated measures
if join_rm:
x, y = np.arange(1, len(vals) + 1), vals
if not vertical:
x, y = y, x
ax.plot(x, y, color=gray, alpha=2. / 3)
# Label the axes and ticks
if vertical:
ax.set_xticklabels(list(names))
else:
ax.set_yticklabels(list(names))
xlabel, ylabel = ylabel, xlabel
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# Turn off the grid parallel to the boxes
if vertical:
ax.xaxis.grid(False)
else:
ax.yaxis.grid(False)
return ax
def violinplot(vals, groupby=None, inner="box", color=None, positions=None,
names=None, order=None, bw="scott", widths=.8, alpha=None,
join_rm=False, gridsize=100, cut=3, inner_kws=None,
ax=None, **kwargs):
"""Create a violin plot (a combination of boxplot and kernel density plot).
Parameters
----------
vals : DataFrame, Series, 2D array, or list of vectors.
Data for plot. DataFrames and 2D arrays are assumed to be "wide" with
each column mapping to a box. Lists of data are assumed to have one
element per box. Can also provide one long Series in conjunction with
a grouping element as the `groupy` parameter to reshape the data into
several violins. Otherwise 1D data will produce a single violins.
groupby : grouping object
If `vals` is a Series, this is used to group into boxes by calling
pd.groupby(vals, groupby).
inner : {'box' | 'stick' | 'points'}
Plot quartiles or individual sample values inside violin.
color : mpl color, sequence of colors, or seaborn palette name
Inner violin colors
positions : number or sequence of numbers
Position of first violin or positions of each violin.
names : list of strings, optional
Names to plot on x axis; otherwise plots numbers. This will override
names inferred from Pandas inputs.
order : list of strings, optional
If vals is a Pandas object with name information, you can control the
order of the plot by providing the violin names in your preferred
order.
bw : {'scott' | 'silverman' | scalar}
Name of reference method to determine kernel size, or size as a
scalar.
widths : float
Width of each violin at maximum density.
alpha : float, optional
Transparancy of violin fill.
join_rm : boolean, optional
If True, positions in the input arrays are treated as repeated
measures and are joined with a line plot.
gridsize : int
Number of discrete gridpoints to evaluate the density on.
cut : scalar
Draw the estimate to cut * bw from the extreme data points.
inner_kws : dict, optional
Keyword arugments for inner plot.
ax : matplotlib axis, optional
Axis to plot on, otherwise grab current axis.
kwargs : additional parameters to fill_betweenx
Returns
-------
ax : matplotlib axis
Axis with violin plot.
"""
if ax is None:
ax = plt.gca()
# Reshape and find labels for the plot
vals, xlabel, ylabel, names = _box_reshape(vals, groupby, names, order)
# Sort out the plot colors
colors, gray = _box_colors(vals, color)
# Initialize the kwarg dict for the inner plot
if inner_kws is None:
inner_kws = {}
inner_kws.setdefault("alpha", .6 if inner == "points" else 1)
inner_kws["alpha"] *= 1 if alpha is None else alpha
inner_kws.setdefault("color", gray)
inner_kws.setdefault("marker", "." if inner == "points" else "")
lw = inner_kws.pop("lw", 1.5 if inner == "box" else .8)
inner_kws.setdefault("linewidth", lw)
# Find where the violins are going
if positions is None:
positions = np.arange(1, len(vals) + 1)
elif not hasattr(positions, "__iter__"):
positions = np.arange(positions, len(vals) + positions)
# Set the default linewidth if not provided in kwargs
try:
lw = kwargs[({"lw", "linewidth"} & set(kwargs)).pop()]
except KeyError:
lw = 1.5
# Iterate over the variables
for i, a in enumerate(vals):
# Fit the KDE
x = positions[i]
kde = stats.gaussian_kde(a, bw)
if isinstance(bw, str):
bw_name = "scotts" if bw == "scott" else bw
_bw = getattr(kde, "%s_factor" % bw_name)() * a.std(ddof=1)
else:
_bw = bw
y = _kde_support(a, _bw, gridsize, cut, (-np.inf, np.inf))
dens = kde.evaluate(y)
scl = 1 / (dens.max() / (widths / 2))
dens *= scl
# Draw the violin
ax.fill_betweenx(y, x - dens, x + dens, alpha=alpha, color=colors[i])
if inner == "box":
for quant in percentiles(a, [25, 75]):
q_x = kde.evaluate(quant) * scl
q_x = [x - q_x, x + q_x]
ax.plot(q_x, [quant, quant], linestyle=":", **inner_kws)
med = np.median(a)
m_x = kde.evaluate(med) * scl
m_x = [x - m_x, x + m_x]
ax.plot(m_x, [med, med], linestyle="--", **inner_kws)
elif inner == "stick":
x_vals = kde.evaluate(a) * scl
x_vals = [x - x_vals, x + x_vals]
ax.plot(x_vals, [a, a], linestyle="-", **inner_kws)
elif inner == "points":
x_vals = [x for _ in a]
ax.plot(x_vals, a, mew=0, linestyle="", **inner_kws)
for side in [-1, 1]:
ax.plot((side * dens) + x, y, c=gray, lw=lw)
# Draw the repeated measure bridges
if join_rm:
ax.plot(range(1, len(vals) + 1), vals,
color=inner_kws["color"], alpha=2. / 3)
# Add in semantic labels
ax.set_xticks(positions)
if names is not None:
if len(vals) != len(names):
raise ValueError("Length of names list must match nuber of bins")
ax.set_xticklabels(list(names))
ax.set_xlim(positions[0] - .5, positions[-1] + .5)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.xaxis.grid(False)
return ax
def _freedman_diaconis_bins(a):
"""Calculate number of hist bins using Freedman-Diaconis rule."""
# From http://stats.stackexchange.com/questions/798/
a = np.asarray(a)
h = 2 * iqr(a) / (len(a) ** (1 / 3))
return np.ceil((a.max() - a.min()) / h)
def distplot(a, bins=None, hist=True, kde=True, rug=False, fit=None,
hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,
color=None, vertical=False, norm_hist=False, axlabel=None,
label=None, ax=None):
"""Flexibly plot a distribution of observations.
Parameters
----------
a : (squeezable to) 1d array
Observed data.
bins : argument for matplotlib hist(), or None, optional
Specification of hist bins, or None to use Freedman-Diaconis rule.
hist : bool, optional
Whether to plot a (normed) histogram.
kde : bool, optional
Whether to plot a gaussian kernel density estimate.
rug : bool, optional
Whether to draw a rugplot on the support axis.
fit : random variable object, optional
An object with `fit` method, returning a tuple that can be passed to a
`pdf` method a positional arguments following an grid of values to
evaluate the pdf on.
{hist, kde, rug, fit}_kws : dictionaries, optional
Keyword arguments for underlying plotting functions.
color : matplotlib color, optional
Color to plot everything but the fitted curve in.
vertical : bool, optional
If True, oberved values are on y-axis.
norm_hist : bool, otional
If True, the histogram height shows a density rather than a count.
This is implied if a KDE or fitted density is plotted.
axlabel : string, False, or None, optional
Name for the support axis label. If None, will try to get it
from a.namel if False, do not set a label.
label : string, optional
Legend label for the relevent component of the plot
ax : matplotlib axis, optional
if provided, plot on this axis
Returns
-------
ax : matplotlib axis
"""
if ax is None:
ax = plt.gca()
# Intelligently label the support axis
label_ax = bool(axlabel)
if axlabel is None and hasattr(a, "name"):
axlabel = a.name
if axlabel is not None:
label_ax = True
# Make a a 1-d array
a = np.asarray(a).squeeze()
# Decide if the hist is normed
norm_hist = norm_hist or kde or (fit is not None)
# Handle dictionary defaults
if hist_kws is None:
hist_kws = dict()
if kde_kws is None:
kde_kws = dict()
if rug_kws is None:
rug_kws = dict()
if fit_kws is None:
fit_kws = dict()
# Get the color from the current color cycle
if color is None:
if vertical:
line, = ax.plot(0, a.mean())
else:
line, = ax.plot(a.mean(), 0)
color = line.get_color()
line.remove()
# Plug the label into the right kwarg dictionary
if label is not None:
if hist:
hist_kws["label"] = label
elif kde:
kde_kws["label"] = label
elif rug:
rug_kws["label"] = label
elif fit:
fit_kws["label"] = label
if hist:
if bins is None:
bins = _freedman_diaconis_bins(a)
hist_kws.setdefault("alpha", 0.4)
hist_kws.setdefault("normed", norm_hist)
orientation = "horizontal" if vertical else "vertical"
hist_color = hist_kws.pop("color", color)
ax.hist(a, bins, orientation=orientation,
color=hist_color, **hist_kws)
if hist_color != color:
hist_kws["color"] = hist_color
if kde:
kde_color = kde_kws.pop("color", color)
kdeplot(a, vertical=vertical, ax=ax, color=kde_color, **kde_kws)
if kde_color != color:
kde_kws["color"] = kde_color
if rug:
rug_color = rug_kws.pop("color", color)
axis = "y" if vertical else "x"
rugplot(a, axis=axis, ax=ax, color=rug_color, **rug_kws)
if rug_color != color:
rug_kws["color"] = rug_color
if fit is not None:
fit_color = fit_kws.pop("color", "#282828")
gridsize = fit_kws.pop("gridsize", 200)
cut = fit_kws.pop("cut", 3)
clip = fit_kws.pop("clip", (-np.inf, np.inf))
bw = stats.gaussian_kde(a).scotts_factor() * a.std(ddof=1)
x = _kde_support(a, bw, gridsize, cut, clip)
params = fit.fit(a)
pdf = lambda x: fit.pdf(x, *params)
y = pdf(x)
if vertical:
x, y = y, x
ax.plot(x, y, color=fit_color, **fit_kws)
if fit_color != "#282828":
fit_kws["color"] = fit_color
if label_ax:
if vertical:
ax.set_ylabel(axlabel)
else:
ax.set_xlabel(axlabel)
return ax
def _univariate_kdeplot(data, shade, vertical, kernel, bw, gridsize, cut,
clip, legend, ax, cumulative=False, **kwargs):
"""Plot a univariate kernel density estimate on one of the axes."""
# Sort out the clipping
if clip is None:
clip = (-np.inf, np.inf)
# Calculate the KDE
if _has_statsmodels:
# Prefer using statsmodels for kernel flexibility
x, y = _statsmodels_univariate_kde(data, kernel, bw,
gridsize, cut, clip,
cumulative=cumulative)
else:
# Fall back to scipy if missing statsmodels
if kernel != "gau":
kernel = "gau"
msg = "Kernel other than `gau` requires statsmodels."
warnings.warn(msg, UserWarning)
if cumulative:
raise ImportError("Cumulative distributions are currently"
"only implemented in statsmodels."
"Please install statsmodels.")
x, y = _scipy_univariate_kde(data, bw, gridsize, cut, clip)
# Make sure the density is nonnegative
y = np.amax(np.c_[np.zeros_like(y), y], axis=1)
# Flip the data if the plot should be on the y axis
if vertical:
x, y = y, x
# Check if a label was specified in the call
label = kwargs.pop("label", None)
# Otherwise check if the data object has a name
if label is None and hasattr(data, "name"):
label = data.name
# Decide if we're going to add a legend
legend = label is not None and legend
label = "_nolegend_" if label is None else label
# Use the active color cycle to find the plot color
line, = ax.plot(x, y, **kwargs)
color = line.get_color()
line.remove()
kwargs.pop("color", None)
# Draw the KDE plot and, optionally, shade
ax.plot(x, y, color=color, label=label, **kwargs)
alpha = kwargs.get("alpha", 0.25)
if shade:
if vertical:
ax.fill_betweenx(y, 1e-12, x, color=color, alpha=alpha)
else:
ax.fill_between(x, 1e-12, y, color=color, alpha=alpha)
# Draw the legend here
if legend:
ax.legend(loc="best")
return ax
def _statsmodels_univariate_kde(data, kernel, bw, gridsize, cut, clip,
cumulative=False):
"""Compute a univariate kernel density estimate using statsmodels."""
fft = kernel == "gau"
kde = sm.nonparametric.KDEUnivariate(data)
kde.fit(kernel, bw, fft, gridsize=gridsize, cut=cut, clip=clip)
if cumulative:
grid, y = kde.support, kde.cdf
else:
grid, y = kde.support, kde.density
return grid, y
def _scipy_univariate_kde(data, bw, gridsize, cut, clip):
"""Compute a univariate kernel density estimate using scipy."""
kde = stats.gaussian_kde(data, bw_method=bw)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw = getattr(kde, "%s_factor" % bw)()
grid = _kde_support(data, bw, gridsize, cut, clip)
y = kde(grid)
return grid, y
def _bivariate_kdeplot(x, y, filled, kernel, bw, gridsize, cut, clip, axlabel,
ax, **kwargs):
"""Plot a joint KDE estimate as a bivariate contour plot."""
# Determine the clipping
if clip is None:
clip = [(-np.inf, np.inf), (-np.inf, np.inf)]
elif np.ndim(clip) == 1:
clip = [clip, clip]
# Calculate the KDE
if _has_statsmodels:
xx, yy, z = _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip)
else:
xx, yy, z = _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip)
# Plot the contours
n_levels = kwargs.pop("n_levels", 10)
cmap = kwargs.get("cmap", "BuGn" if filled else "BuGn_d")
if isinstance(cmap, str):
if cmap.endswith("_d"):
pal = ["#333333"]
pal.extend(color_palette(cmap.replace("_d", "_r"), 2))
cmap = blend_palette(pal, as_cmap=True)
kwargs["cmap"] = cmap
contour_func = ax.contourf if filled else ax.contour
contour_func(xx, yy, z, n_levels, **kwargs)
kwargs["n_levels"] = n_levels
# Label the axes
if hasattr(x, "name") and axlabel:
ax.set_xlabel(x.name)
if hasattr(y, "name") and axlabel:
ax.set_ylabel(y.name)
return ax
def _statsmodels_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using statsmodels."""
if isinstance(bw, str):
bw_func = getattr(sm.nonparametric.bandwidths, "bw_" + bw)
x_bw = bw_func(x)
y_bw = bw_func(y)
bw = [x_bw, y_bw]
elif np.isscalar(bw):
bw = [bw, bw]
kde = sm.nonparametric.KDEMultivariate([x, y], "cc", bw)
x_support = _kde_support(x, kde.bw[0], gridsize, cut, clip[0])
y_support = _kde_support(y, kde.bw[1], gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def _scipy_bivariate_kde(x, y, bw, gridsize, cut, clip):
"""Compute a bivariate kde using scipy."""
data = np.c_[x, y]
kde = stats.gaussian_kde(data.T)
data_std = data.std(axis=0, ddof=1)
if isinstance(bw, str):
bw = "scotts" if bw == "scott" else bw
bw_x = getattr(kde, "%s_factor" % bw)() * data_std[0]
bw_y = getattr(kde, "%s_factor" % bw)() * data_std[1]
x_support = _kde_support(data[:, 0], bw_x, gridsize, cut, clip[0])
y_support = _kde_support(data[:, 1], bw_y, gridsize, cut, clip[1])
xx, yy = np.meshgrid(x_support, y_support)
z = kde([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return xx, yy, z
def kdeplot(data, data2=None, shade=False, vertical=False, kernel="gau",
bw="scott", gridsize=100, cut=3, clip=None, legend=True, ax=None,
cumulative=False, **kwargs):
"""Fit and plot a univariate or bivarate kernel density estimate.
Parameters
----------
data : 1d or 2d array-like
Input data. If two-dimensional, assumed to be shaped (n_unit x n_var),
and a bivariate contour plot will be drawn.
data2: 1d array-like
Second input data. If provided `data` must be one-dimensional, and
a bivariate plot is produced.
shade : bool, optional
If true, shade in the area under the KDE curve (or draw with filled
contours when data is bivariate).
vertical : bool
If True, density is on x-axis.
kernel : {'gau' | 'cos' | 'biw' | 'epa' | 'tri' | 'triw' }, optional
Code for shape of kernel to fit with. Bivariate KDE can only use
gaussian kernel.
bw : {'scott' | 'silverman' | scalar | pair of scalars }, optional
Name of reference method to determine kernel size, scalar factor,
or scalar for each dimension of the bivariate plot.
gridsize : int, optional
Number of discrete points in the evaluation grid.
cut : scalar, optional
Draw the estimate to cut * bw from the extreme data points.
clip : pair of scalars, or pair of pair of scalars, optional
Lower and upper bounds for datapoints used to fit KDE. Can provide
a pair of (low, high) bounds for bivariate plots.
legend : bool, optoinal
If True, add a legend or label the axes when possible.
ax : matplotlib axis, optional
Axis to plot on, otherwise uses current axis.
cumulative : bool
If draw, draw the cumulative distribution estimated by the kde.
kwargs : other keyword arguments for plot()
Returns
-------
ax : matplotlib axis
Axis with plot.
"""
if ax is None:
ax = plt.gca()
data = data.astype(np.float64)
if data2 is not None:
data2 = data2.astype(np.float64)
bivariate = False
if isinstance(data, np.ndarray) and np.ndim(data) > 1:
bivariate = True
x, y = data.T
elif isinstance(data, pd.DataFrame) and np.ndim(data) > 1:
bivariate = True
x = data.iloc[:, 0].values
y = data.iloc[:, 1].values
elif data2 is not None:
bivariate = True
x = data
y = data2
if bivariate and cumulative:
raise TypeError("Cumulative distribution plots are not"
"supported for bivariate distributions.")
if bivariate:
ax = _bivariate_kdeplot(x, y, shade, kernel, bw, gridsize,
cut, clip, legend, ax, **kwargs)
else:
ax = _univariate_kdeplot(data, shade, vertical, kernel, bw,
gridsize, cut, clip, legend, ax,
cumulative=cumulative, **kwargs)
return ax
def rugplot(a, height=None, axis="x", ax=None, **kwargs):
"""Plot datapoints in an array as sticks on an axis.
Parameters
----------
a : vector
1D array of datapoints.
height : scalar, optional
Height of ticks, if None draw at 5% of axis range.
axis : {'x' | 'y'}, optional
Axis to draw rugplot on.
ax : matplotlib axis
Axis to draw plot into; otherwise grabs current axis.
kwargs : other keyword arguments for plt.plot()
Returns
-------
ax : matplotlib axis
Axis with rugplot.
"""
if ax is None:
ax = plt.gca()
a = np.asarray(a)
vertical = kwargs.pop("vertical", None)
if vertical is not None:
axis = "y" if vertical else "x"
other_axis = dict(x="y", y="x")[axis]
min, max = getattr(ax, "get_%slim" % other_axis)()
if height is None:
range = max - min
height = range * .05
if axis == "x":
ax.plot([a, a], [min, min + height], **kwargs)
else:
ax.plot([min, min + height], [a, a], **kwargs)
return ax
def jointplot(x, y, data=None, kind="scatter", stat_func=stats.pearsonr,
color=None, size=6, ratio=5, space=.2,
dropna=True, xlim=None, ylim=None,
joint_kws=None, marginal_kws=None, annot_kws=None):
"""Draw a plot of two variables with bivariate and univariate graphs.
Parameters
----------
x, y : strings or vectors
Data or names of variables in `data`.
data : DataFrame, optional
DataFrame when `x` and `y` are variable names.
kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional
Kind of plot to draw.
stat_func : callable or None
Function used to calculate a statistic about the relationship and
annotate the plot. Should map `x` and `y` either to a single value
or to a (value, p) tuple. Set to ``None`` if you don't want to
annotate the plot.
color : matplotlib color, optional
Color used for the plot elements.
size : numeric, optional
Size of the figure (it will be square).
ratio : numeric, optional
Ratio of joint axes size to marginal axes height.
space : numeric, optional
Space between the joint and marginal axes
dropna : bool, optional
If True, remove observations that are missing from `x` and `y`.
{x, y}lim : two-tuples, optional
Axis limits to set before plotting.
{joint, marginal, annot}_kws : dicts
Additional keyword arguments for the plot components.
Returns
-------
grid : JointGrid
JointGrid object with the plot on it.
See Also
--------
JointGrid : The Grid class used for drawing this plot. Use it directly if
you need more flexibility.
"""
# Set up empty default kwarg dicts
if joint_kws is None:
joint_kws = {}
if marginal_kws is None:
marginal_kws = {}
if annot_kws is None:
annot_kws = {}
# Make a colormap based off the plot color
if color is None:
color = color_palette()[0]
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [set_hls_values(color_rgb, l=l) for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Initialize the JointGrid object
grid = JointGrid(x, y, data, dropna=dropna,
size=size, ratio=ratio, space=space,
xlim=xlim, ylim=ylim)
# Plot the data using the grid
if kind == "scatter":
joint_kws.setdefault("color", color)
grid.plot_joint(plt.scatter, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = _freedman_diaconis_bins(grid.x)
y_bins = _freedman_diaconis_bins(grid.y)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
elif kind.startswith("kde"):
joint_kws.setdefault("shade", True)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("shade", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("reg"):
from .linearmodels import regplot
marginal_kws.setdefault("color", color)
grid.plot_marginals(distplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
from .linearmodels import residplot
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", False)
distplot(x, ax=grid.ax_marg_x, **marginal_kws)
distplot(y, vertical=True, fit=stats.norm, ax=grid.ax_marg_y,
**marginal_kws)
stat_func = None
if stat_func is not None:
grid.annotate(stat_func, **annot_kws)
return grid
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
wangmiao1981/spark | python/pyspark/pandas/tests/test_series_datetime.py | 15 | 10917 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesDateTimeTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf1(self):
date1 = pd.Series(pd.date_range("2012-1-1 12:45:31", periods=3, freq="M"))
date2 = pd.Series(pd.date_range("2013-3-11 21:45:00", periods=3, freq="W"))
return pd.DataFrame(dict(start_date=date1, end_date=date2))
@property
def pd_start_date(self):
return self.pdf1["start_date"]
@property
def ks_start_date(self):
return ps.from_pandas(self.pd_start_date)
def check_func(self, func):
self.assert_eq(func(self.ks_start_date), func(self.pd_start_date))
def test_timestamp_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
# Those fail in certain OSs presumably due to different
# timezone behaviours inherited from C library.
actual = (psdf["end_date"] - psdf["start_date"] - 1).to_pandas()
expected = (pdf["end_date"] - pdf["start_date"]) // np.timedelta64(1, "s") - 1
# self.assert_eq(actual, expected)
actual = (psdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31") - 1).to_pandas()
expected = (pdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31")) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
actual = (pd.Timestamp("2013-3-11 21:45:00") - psdf["start_date"] - 1).to_pandas()
expected = (pd.Timestamp("2013-3-11 21:45:00") - pdf["start_date"]) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "datetime subtraction can only be applied to datetime series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"]
def test_arithmetic_op_exceptions(self):
psser = self.ks_start_date
py_datetime = self.pd_start_date.dt.to_pydatetime()
datetime_index = ps.Index(self.pd_start_date)
for other in [1, 0.1, psser, datetime_index, py_datetime]:
expected_err_msg = "Addition can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser + other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other + psser)
expected_err_msg = "Multiplication can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser * other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other * psser)
expected_err_msg = "True division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser / other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other / psser)
expected_err_msg = "Floor division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser // other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other // psser)
expected_err_msg = "Modulo can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser % other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other % psser)
expected_err_msg = "datetime subtraction can only be applied to datetime series."
for other in [1, 0.1]:
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other - psser)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaises(NotImplementedError, lambda: py_datetime - psser)
def test_date_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf["end_date"].dt.date - psdf["start_date"].dt.date,
(pdf["end_date"].dt.date - pdf["start_date"].dt.date).dt.days,
)
self.assert_eq(
psdf["end_date"].dt.date - datetime.date(2012, 1, 1),
(pdf["end_date"].dt.date - datetime.date(2012, 1, 1)).dt.days,
)
self.assert_eq(
datetime.date(2013, 3, 11) - psdf["start_date"].dt.date,
(datetime.date(2013, 3, 11) - pdf["start_date"].dt.date).dt.days,
)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "date subtraction can only be applied to date series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"].dt.date
@unittest.skip(
"It fails in certain OSs presumably due to different "
"timezone behaviours inherited from C library."
)
def test_div(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
for u in "D", "s", "ms":
duration = np.timedelta64(1, u)
self.assert_eq(
(psdf["end_date"] - psdf["start_date"]) / duration,
(pdf["end_date"] - pdf["start_date"]) / duration,
)
@unittest.skip("It is currently failed probably for the same reason in 'test_subtraction'")
def test_date(self):
self.check_func(lambda x: x.dt.date)
def test_time(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.time)
def test_timetz(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.timetz)
def test_year(self):
self.check_func(lambda x: x.dt.year)
def test_month(self):
self.check_func(lambda x: x.dt.month)
def test_day(self):
self.check_func(lambda x: x.dt.day)
def test_hour(self):
self.check_func(lambda x: x.dt.hour)
def test_minute(self):
self.check_func(lambda x: x.dt.minute)
def test_second(self):
self.check_func(lambda x: x.dt.second)
def test_microsecond(self):
self.check_func(lambda x: x.dt.microsecond)
def test_nanosecond(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.nanosecond)
def test_week(self):
self.check_func(lambda x: x.dt.week)
def test_weekofyear(self):
self.check_func(lambda x: x.dt.weekofyear)
def test_dayofweek(self):
self.check_func(lambda x: x.dt.dayofweek)
def test_weekday(self):
self.check_func(lambda x: x.dt.weekday)
def test_dayofyear(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_quarter(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_is_month_start(self):
self.check_func(lambda x: x.dt.is_month_start)
def test_is_month_end(self):
self.check_func(lambda x: x.dt.is_month_end)
def test_is_quarter_start(self):
self.check_func(lambda x: x.dt.is_quarter_start)
def test_is_quarter_end(self):
self.check_func(lambda x: x.dt.is_quarter_end)
def test_is_year_start(self):
self.check_func(lambda x: x.dt.is_year_start)
def test_is_year_end(self):
self.check_func(lambda x: x.dt.is_year_end)
def test_is_leap_year(self):
self.check_func(lambda x: x.dt.is_leap_year)
def test_daysinmonth(self):
self.check_func(lambda x: x.dt.daysinmonth)
def test_days_in_month(self):
self.check_func(lambda x: x.dt.days_in_month)
@unittest.expectedFailure
def test_tz_localize(self):
self.check_func(lambda x: x.dt.tz_localize("America/New_York"))
@unittest.expectedFailure
def test_tz_convert(self):
self.check_func(lambda x: x.dt.tz_convert("America/New_York"))
def test_normalize(self):
self.check_func(lambda x: x.dt.normalize())
def test_strftime(self):
self.check_func(lambda x: x.dt.strftime("%Y-%m-%d"))
def test_round(self):
self.check_func(lambda x: x.dt.round(freq="min"))
self.check_func(lambda x: x.dt.round(freq="H"))
def test_floor(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
def test_ceil(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
@unittest.skip("Unsupported locale setting")
def test_month_name(self):
self.check_func(lambda x: x.dt.month_name())
self.check_func(lambda x: x.dt.month_name(locale="en_US.UTF-8"))
@unittest.skip("Unsupported locale setting")
def test_day_name(self):
self.check_func(lambda x: x.dt.day_name())
self.check_func(lambda x: x.dt.day_name(locale="en_US.UTF-8"))
def test_unsupported_type(self):
self.assertRaisesRegex(
ValueError, "Cannot call DatetimeMethods on type LongType", lambda: ps.Series([0]).dt
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series_datetime import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
kdebrab/pandas | pandas/tests/sparse/frame/test_apply.py | 3 | 2616 | import pytest
import numpy as np
from pandas import SparseDataFrame, DataFrame, Series, bdate_range
from pandas.core import nanops
from pandas.util import testing as tm
@pytest.fixture
def dates():
return bdate_range('1/1/2011', periods=10)
@pytest.fixture
def empty():
return SparseDataFrame()
@pytest.fixture
def frame(dates):
data = {'A': [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
'C': np.arange(10, dtype=np.float64),
'D': [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan]}
return SparseDataFrame(data, index=dates)
@pytest.fixture
def fill_frame(frame):
values = frame.values.copy()
values[np.isnan(values)] = 2
return SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=frame.index)
def test_apply(frame):
applied = frame.apply(np.sqrt)
assert isinstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(frame.values))
# agg / broadcast
with tm.assert_produces_warning(FutureWarning):
broadcasted = frame.apply(np.sum, broadcast=True)
assert isinstance(broadcasted, SparseDataFrame)
with tm.assert_produces_warning(FutureWarning):
exp = frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
applied = frame.apply(np.sum)
tm.assert_series_equal(applied,
frame.to_dense().apply(nanops.nansum))
def test_apply_fill(fill_frame):
applied = fill_frame.apply(np.sqrt)
assert applied['A'].fill_value == np.sqrt(2)
def test_apply_empty(empty):
assert empty.apply(np.sqrt) is empty
def test_apply_nonuq():
orig = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=['a', 'a', 'c'])
sparse = orig.to_sparse()
res = sparse.apply(lambda s: s[0], axis=1)
exp = orig.apply(lambda s: s[0], axis=1)
# dtype must be kept
assert res.dtype == np.int64
# ToDo: apply must return subclassed dtype
assert isinstance(res, Series)
tm.assert_series_equal(res.to_dense(), exp)
# df.T breaks
sparse = orig.T.to_sparse()
res = sparse.apply(lambda s: s[0], axis=0) # noqa
exp = orig.T.apply(lambda s: s[0], axis=0)
# TODO: no non-unique columns supported in sparse yet
# tm.assert_series_equal(res.to_dense(), exp)
def test_applymap(frame):
# just test that it works
result = frame.applymap(lambda x: x * 2)
assert isinstance(result, SparseDataFrame)
| bsd-3-clause |
noelevans/sandpit | stats/rugby_conversion_analysis.py | 1 | 1916 | from matplotlib import pyplot as plt
import numpy as np
from scipy.stats import beta
import seaborn as sns
def mean(a, b):
""" Statistical mean of distributon defined by alpha and beta values. """
return a / float(a + b)
def main():
""" Two beta distributions with the same mean but differing variance.
The distributions reflect kicking averages of a player who has been very
consistent through his career verses another who has had a range of better
and worse years. On the x-axis, we see the expected score ratio over the
season - the y-axis indicating the likelihood of that average
The prime graphs (lighter colouring) show the change in belief for the
players' expected season average after 4 kicks (trials) where only 1 was
successful. The more consistent player's season expectation changes very
little relative to the more varied player.
Adapted from this explanation:
http://varianceexplained.org/statistics/beta_distribution_and_baseball/
"""
a_1 = 61
b_1 = 20
a_2 = 610
b_2 = 200
this_season_scores = 1
this_season_misses = 3
print(mean(a_1, b_1))
print(mean(a_2, b_2))
x = np.linspace(0, 1, 1000)
y_1 = beta.pdf(x, a_1, b_1)
y_2 = beta.pdf(x, a_2, b_2)
y_1_prime = beta.pdf(x, a_1 + this_season_scores, b_1 + this_season_misses)
y_2_prime = beta.pdf(x, a_2 + this_season_scores, b_2 + this_season_misses)
plt.plot(x, y_1, 'r', lw=2, alpha=0.8, label='a = 61, b = 20')
plt.plot(x, y_1_prime, 'r', lw=2, alpha=0.2, label='a = 62, b = 23')
plt.plot(x, y_2, 'b', lw=2, alpha=0.8, label='a = 610, b = 200')
plt.plot(x, y_2_prime, 'b', lw=2, alpha=0.2, label='a = 611, b = 203')
plt.xlim(0.3, 1.0)
plt.xlabel('p(scoring)')
plt.ylabel('probability density')
plt.legend(loc='upper left')
plt.show()
if __name__ == '__main__':
main()
| mit |
adamcandy/QGIS-Meshing | extras/shape/extractPoints.py | 3 | 6740 | """
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, [email protected]
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
Sets the read and the write file stream according to
the command line arguments given.
The first argument specifies which shape file the user
wants to specify the boundaries of
The second arguments specifies the boundary polygon
The third argument specifies the file path to which the
new shape has to be written
"""
from shapely.ops import cascaded_union
import shapefile
from shapely.geometry import *
import sys
import matplotlib.pyplot as pyplot
"""
Write the results to .shp.
"""
def writeShapeFile(points, filepath) :
#begin the instance pf writer class
w = shapefile.Writer()
#ensure shape and records the balance
w.autobalance = 1
i = 0
for l in points:
pList = []
pList.append(l)
if len(l)==1 :
w.point(pList[0][0],pList[0][1])
w.field("%d_FLD"%i,"C","40")
i+=1
elif len(l)==2 :
w.line(parts = pList)
w.field("%d_FLD"%i,"C","40")
i+=1
else :
w.poly(parts = pList)
w.field("%d_FLD"%i,"C","40")
i += 1
w.save(filepath)
print("Number of shapes Written %d" %i)
"""
Used if there is only one shape in the boundary shapefile.
"""
def getBoundaryPointsList(bounds):
shapes = bounds.shapes()
pointsList = (shapes[0].points)
polygon = Polygon(pointsList)
return (polygon,pointsList)
"""
When more than one shapefile, works out which objects from the .shp file are overlapping and return the exterior coords of this new shape.
"""
def overlap (bounds, plot = False):
# Read shapefile and work out overlap. Optional plot.
shapes = bounds.shapes()
pointsList = []
for i in range(len(shapes)):
# Check datapoint is valid.
pointsList.append(shapes[i].points)
# Turn the points into polygons.
polygons = []
for j in range(len(pointsList)):
polygons.append(Polygon([pointsList[j][i] for i in range(len(pointsList[j]))]))
# Add overlapping shapes into a list so we know which to join together.
overlapping = []
for n in range(len(polygons) - 1):
if polygons[n].intersects(polygons[n+1]) == True:
# Two if statements to make sure the same polygon isn't being entered more than once.
if polygons[n] not in overlapping: overlapping.append(polygons[n])
if polygons[n + 1] not in overlapping: overlapping.append(polygons[n + 1])
# Create a new shape from the overlapping shapes.
join = cascaded_union(overlapping)
poly = [join]
# Take the coords. of the perimeter of this new shape.
coords = []
for i in range(len(join.exterior.coords)):
coords.append(list(join.exterior.coords[i]))
# Plot results if True. Store x-y coords of the perimeter in two lists to plot.
if plot == True:
x = []; y = []
for i in range(len(coords)):
x.append(coords[i][0]); y.append(coords[i][1])
# Plot results.
pyplot.plot(x, y)
pyplot.xlim(-4, 4)
pyplot.ylim(-4, 4)
pyplot.show()
return join, coords
"""
Output the final results as a .geo file.
"""
def write_geo (coords, filename):
# Write new shape to .geo.
print coords
target = open("%s.geo" % filename, "w") # Creates .geo file to write to.
for i in range(len(coords)):
# Write point.
target.write('Point(%d) = {%.3f, %.3f, 0, %.3f};\n' %(i + 1, coords[i][0], coords[i][1], 1.0))
# Write the lines connecting the sequential points.
if (i + 1 > 1): target.write('Line(%d) = {%d, %d};\n' % (i, i, i + 1))
# Connect first and last points.
target.write('Line(%d) = {%d, %d};\n' % (i + 1, 1, i + 1))
target.close()
return False
assert len(sys.argv)==5, "Incorrect Number of Arguments passed"
readPath = sys.argv[1]
boundaryPath = sys.argv[2]
writePath = sys.argv[3]
areaThreshold = float(sys.argv[4])
#input stream for the given shape
sf = shapefile.Reader(readPath)
#shapes contained in the given file
shapes = sf.shapes();
#boundary = bounds.shapes[0]
boundary = shapefile.Reader(boundaryPath)
if (len(boundary.shapes()) > 1): boundaryPolygons, boundaryPointList1 = overlap(boundary); boundaryPointList = [boundaryPointList1]
else: boundaryPolygons, boundaryPointList = getBoundaryPointsList(boundary)
"""
Takes shape from shapefile and converts to a Shapely Polygon. Checks if this polygon lies within the boundary using a.intersect(b). If it does it will perform a.intersection(b) operation returning a Polygon/MultiPolygon which lies within the boundary and then plots result.
"""
shapeList = []
for shape in shapes:
x = []; y = []; shp = []
polygon = Polygon([shape.points[i] for i in range(len(shape.points))])
if (polygon.intersects(boundaryPolygons)):
intersection = boundaryPolygons.intersection(polygon)
if intersection.area >= areaThreshold:
if intersection.geom_type == 'Polygon':
for i in range(len(list(intersection.exterior.coords))):
x.append(intersection.exterior.coords[i][0]); y.append(intersection.exterior.coords[i][1])
pyplot.plot(x, y)
shp.append([intersection.exterior.coords[i][0], intersection.exterior.coords[i][1]])
if intersection.geom_type == 'MultiPolygon':
for j in range(len(intersection)):
for i in range(len(list(intersection[j].exterior.coords))):
x.append(intersection[j].exterior.coords[i][0]); y.append(intersection[j].exterior.coords[i][1])
pyplot.plot(x, y)
shp.append([intersection[j].exterior.coords[i][0], intersection[j].exterior.coords[i][1]])
shapeList.append(shp)
writeShapeFile(shapeList, writePath)
# Plot boundary.
x=[]
y=[]
for i in range(len(boundaryPointList)):
x.append(boundaryPointList[i][0]); y.append(boundaryPointList[i][1])
pyplot.plot(x,y)
pyplot.xlim(min(x)-1,max(x)+1)
pyplot.ylim(min(y)-1,max(y)+1)
pyplot.show()
| lgpl-2.1 |
rvraghav93/scikit-learn | examples/neighbors/plot_regression.py | 15 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
# #############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
# #############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/testing/compare.py | 1 | 13145 | #=======================================================================
""" A set of utilities for comparing results.
"""
#=======================================================================
from __future__ import division
import matplotlib
from matplotlib.compat import subprocess
from matplotlib.testing.noseclasses import ImageComparisonFailure
from matplotlib.testing import image_util
from matplotlib import _png
from matplotlib import _get_cachedir
from matplotlib import cbook
from distutils import version
import hashlib
import math
import os
import numpy as np
import shutil
import sys
from functools import reduce
#=======================================================================
__all__ = [
'compare_float',
'compare_images',
'comparable_formats',
]
#-----------------------------------------------------------------------
def make_test_filename(fname, purpose):
"""
Make a new filename by inserting `purpose` before the file's
extension.
"""
base, ext = os.path.splitext(fname)
return '%s-%s%s' % (base, purpose, ext)
def compare_float( expected, actual, relTol = None, absTol = None ):
"""Fail if the floating point values are not close enough, with
the givem message.
You can specify a relative tolerance, absolute tolerance, or both.
"""
if relTol is None and absTol is None:
exMsg = "You haven't specified a 'relTol' relative tolerance "
exMsg += "or a 'absTol' absolute tolerance function argument. "
exMsg += "You must specify one."
raise ValueError(exMsg)
msg = ""
if absTol is not None:
absDiff = abs( expected - actual )
if absTol < absDiff:
expectedStr = str( expected )
actualStr = str( actual )
absDiffStr = str( absDiff )
absTolStr = str( absTol )
msg += "\n"
msg += " Expected: " + expectedStr + "\n"
msg += " Actual: " + actualStr + "\n"
msg += " Abs Diff: " + absDiffStr + "\n"
msg += " Abs Tol: " + absTolStr + "\n"
if relTol is not None:
# The relative difference of the two values. If the expected value is
# zero, then return the absolute value of the difference.
relDiff = abs( expected - actual )
if expected:
relDiff = relDiff / abs( expected )
if relTol < relDiff:
# The relative difference is a ratio, so it's always unitless.
relDiffStr = str( relDiff )
relTolStr = str( relTol )
expectedStr = str( expected )
actualStr = str( actual )
msg += "\n"
msg += " Expected: " + expectedStr + "\n"
msg += " Actual: " + actualStr + "\n"
msg += " Rel Diff: " + relDiffStr + "\n"
msg += " Rel Tol: " + relTolStr + "\n"
if msg:
return msg
else:
return None
#-----------------------------------------------------------------------
# A dictionary that maps filename extensions to functions that map
# parameters old and new to a list that can be passed to Popen to
# convert files with that extension to png format.
def get_cache_dir():
cachedir = _get_cachedir()
if cachedir is None:
raise RuntimeError('Could not find a suitable configuration directory')
cache_dir = os.path.join(cachedir, 'test_cache')
if not os.path.exists(cache_dir):
try:
cbook.mkdirs(cache_dir)
except IOError:
return None
if not os.access(cache_dir, os.W_OK):
return None
return cache_dir
def get_file_hash(path, block_size=2**20):
md5 = hashlib.md5()
with open(path, 'rb') as fd:
while True:
data = fd.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
converter = { }
def make_external_conversion_command(cmd):
def convert(old, new):
cmdline = cmd(old, new)
pipe = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if not os.path.exists(new) or errcode:
msg = "Conversion command failed:\n%s\n" % ' '.join(cmdline)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
return convert
if matplotlib.checkdep_ghostscript() is not None:
if sys.platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
cmd = lambda old, new: \
[gs, '-q', '-sDEVICE=png16m', '-dNOPAUSE', '-dBATCH',
'-sOutputFile=' + new, old]
converter['pdf'] = make_external_conversion_command(cmd)
converter['eps'] = make_external_conversion_command(cmd)
if matplotlib.checkdep_inkscape() is not None:
cmd = lambda old, new: \
['inkscape', '-z', old, '--export-png', new]
converter['svg'] = make_external_conversion_command(cmd)
def comparable_formats():
'''Returns the list of file formats that compare_images can compare
on this system.'''
return ['png'] + converter.keys()
def convert(filename, cache):
'''
Convert the named file into a png file. Returns the name of the
created file.
If *cache* is True, the result of the conversion is cached in
`~/.matplotlib/test_cache/`. The caching is based on a hash of the
exact contents of the input file. The is no limit on the size of
the cache, so it may need to be manually cleared periodically.
'''
base, extension = filename.rsplit('.', 1)
if extension not in converter:
raise ImageComparisonFailure("Don't know how to convert %s files to png" % extension)
newname = base + '_' + extension + '.png'
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
# Only convert the file if the destination doesn't already exist or
# is out of date.
if (not os.path.exists(newname) or
os.stat(newname).st_mtime < os.stat(filename).st_mtime):
if cache:
cache_dir = get_cache_dir()
else:
cache_dir = None
if cache_dir is not None:
hash = get_file_hash(filename)
new_ext = os.path.splitext(newname)[1]
cached_file = os.path.join(cache_dir, hash + new_ext)
if os.path.exists(cached_file):
shutil.copyfile(cached_file, newname)
return newname
converter[extension](filename, newname)
if cache_dir is not None:
shutil.copyfile(newname, cached_file)
return newname
verifiers = { }
def verify(filename):
"""
Verify the file through some sort of verification tool.
"""
if not os.path.exists(filename):
raise IOError("'%s' does not exist" % filename)
base, extension = filename.rsplit('.', 1)
verifier = verifiers.get(extension, None)
if verifier is not None:
cmd = verifier(filename)
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipe.communicate()
errcode = pipe.wait()
if errcode != 0:
msg = "File verification command failed:\n%s\n" % ' '.join(cmd)
if stdout:
msg += "Standard output:\n%s\n" % stdout
if stderr:
msg += "Standard error:\n%s\n" % stderr
raise IOError(msg)
# Turning this off, because it seems to cause multiprocessing issues
if matplotlib.checkdep_xmllint() and False:
verifiers['svg'] = lambda filename: [
'xmllint', '--valid', '--nowarning', '--noout', filename]
def crop_to_same(actual_path, actual_image, expected_path, expected_image):
# clip the images to the same size -- this is useful only when
# comparing eps to pdf
if actual_path[-7:-4] == 'eps' and expected_path[-7:-4] == 'pdf':
aw, ah = actual_image.shape
ew, eh = expected_image.shape
actual_image = actual_image[int(aw/2-ew/2):int(aw/2+ew/2),int(ah/2-eh/2):int(ah/2+eh/2)]
return actual_image, expected_image
def calculate_rms(expectedImage, actualImage):
# calculate the per-pixel errors, then compute the root mean square error
num_values = np.prod(expectedImage.shape)
abs_diff_image = abs(expectedImage - actualImage)
# On Numpy 1.6, we can use bincount with minlength, which is much faster than
# using histogram
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
if found_version >= expected_version:
histogram = np.bincount(abs_diff_image.ravel(), minlength=256)
else:
histogram = np.histogram(abs_diff_image, bins=np.arange(257))[0]
sum_of_squares = np.sum(histogram * np.arange(len(histogram))**2)
rms = np.sqrt(float(sum_of_squares) / num_values)
return rms
def compare_images( expected, actual, tol, in_decorator=False ):
'''Compare two image files - not the greatest, but fast and good enough.
= EXAMPLE
# img1 = "./baseline/plot.png"
# img2 = "./output/plot.png"
#
# compare_images( img1, img2, 0.001 ):
= INPUT VARIABLES
- expected The filename of the expected image.
- actual The filename of the actual image.
- tol The tolerance (a color value difference, where 255 is the
maximal difference). The test fails if the average pixel
difference is greater than this value.
- in_decorator If called from image_comparison decorator, this should be
True. (default=False)
'''
verify(actual)
# Convert the image to png
extension = expected.split('.')[-1]
if not os.path.exists(expected):
raise IOError('Baseline image %r does not exist.' % expected)
if extension != 'png':
actual = convert(actual, False)
expected = convert(expected, True)
# open the image files and remove the alpha channel (if it exists)
expectedImage = _png.read_png_int( expected )
actualImage = _png.read_png_int( actual )
expectedImage = expectedImage[:, :, :3]
actualImage = actualImage[:, :, :3]
actualImage, expectedImage = crop_to_same(actual, actualImage, expected, expectedImage)
# convert to signed integers, so that the images can be subtracted without
# overflow
expectedImage = expectedImage.astype(np.int16)
actualImage = actualImage.astype(np.int16)
# compare the resulting image histogram functions
expected_version = version.LooseVersion("1.6")
found_version = version.LooseVersion(np.__version__)
# On Numpy 1.6, we can use bincount with minlength, which is much faster than
# using histogram
if found_version >= expected_version:
rms = 0
for i in xrange(0, 3):
h1p = expectedImage[:,:,i]
h2p = actualImage[:,:,i]
h1h = np.bincount(h1p.ravel(), minlength=256)
h2h = np.bincount(h2p.ravel(), minlength=256)
rms += np.sum(np.power((h1h-h2h), 2))
else:
rms = 0
ns = np.arange(257)
for i in xrange(0, 3):
h1p = expectedImage[:,:,i]
h2p = actualImage[:,:,i]
h1h = np.histogram(h1p, bins=ns)[0]
h2h = np.histogram(h2p, bins=ns)[0]
rms += np.sum(np.power((h1h-h2h), 2))
rms = calculate_rms(expectedImage, actualImage)
diff_image = make_test_filename(actual, 'failed-diff')
if rms <= tol:
if os.path.exists(diff_image):
os.unlink(diff_image)
return None
save_diff_image( expected, actual, diff_image )
if in_decorator:
results = dict(
rms = rms,
expected = str(expected),
actual = str(actual),
diff = str(diff_image),
)
return results
else:
# old-style call from mplTest directory
msg = " Error: Image files did not match.\n" \
" RMS Value: " + str( rms ) + "\n" \
" Expected:\n " + str( expected ) + "\n" \
" Actual:\n " + str( actual ) + "\n" \
" Difference:\n " + str( diff_image ) + "\n" \
" Tolerance: " + str( tol ) + "\n"
return msg
def save_diff_image( expected, actual, output ):
expectedImage = _png.read_png( expected )
actualImage = _png.read_png( actual )
actualImage, expectedImage = crop_to_same(actual, actualImage, expected, expectedImage)
expectedImage = np.array(expectedImage).astype(np.float)
actualImage = np.array(actualImage).astype(np.float)
assert expectedImage.ndim==actualImage.ndim
assert expectedImage.shape==actualImage.shape
absDiffImage = abs(expectedImage-actualImage)
# expand differences in luminance domain
absDiffImage *= 255 * 10
save_image_np = np.clip(absDiffImage, 0, 255).astype(np.uint8)
height, width, depth = save_image_np.shape
# The PDF renderer doesn't produce an alpha channel, but the
# matplotlib PNG writer requires one, so expand the array
if depth == 3:
with_alpha = np.empty((height, width, 4), dtype=np.uint8)
with_alpha[:,:,0:3] = save_image_np
save_image_np = with_alpha
# Hard-code the alpha channel to fully solid
save_image_np[:,:,3] = 255
_png.write_png(save_image_np.tostring(), width, height, output)
| unlicense |
Gleland/SpectralAnalysis | GTanalysis2.py | 4 | 17389 | ##############################################################################
# Created by Garrett Thompson
# Graphical User Interface for Data Analysis
# Created at Northern Arizona University
# for use in the Astrophysical Ice Laboratory
# Advisors: Jennifer Hanley, Will Grundy, Henry Roe
# [email protected]
##############################################################################
import os
import csv
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as cf
from scipy.fftpack import fft, fftfreq, ifft
from scipy.signal import savgol_filter as sgf
from scipy.integrate import trapz
def main():
folder_to_save = choose_dir()
#choose files for analysis
raw_x,raw_y, raw_xbg,raw_ybg = choose_files(folder_to_save)
print("Plotting imported data...")
plotting_data_for_inspection(raw_x,raw_y,'Raw Data','Wavenumber (cm-1)','% Transmittance','rawspectrum.pdf',folder_to_save, False)
plotting_data_for_inspection(raw_xbg,raw_ybg,'Raw Background','Wavenumber (cm-1)','% Transmittance','rawbackground.pdf',folder_to_save, False)
#user chooses method after inspecting plots
user_method = str(raw_input('Press "s" for savitsky-golay filter, or "f" for fft filter\n:'))
choosing = True
while choosing:
if user_method.lower() == 's':
# savitsky-golay option was chosen
choosing = False
args_list = [folder_to_save, raw_y, raw_ybg, raw_x]
raw_x, norm_smooth = sgf_calc(args_list)
plot_data(raw_x,norm_smooth,folder_to_save)
elif user_method.lower() == 'f':
# fft option was chosen
choosing = False
frq_x,frq_xbg,fft_y,fft_ybg = fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save)
plot_figure, plot_axis = plotting_data_for_inspection(frq_x,np.log(abs(fft_ybg)),'FFT of raw bg','Cycles/Wavenumber (cm)','Log(Power/Frequency)','fft_background.pdf',folder_to_save, False)
filt_y = fft_y.copy()
filt_ybg = fft_ybg.copy()
raw_input('Zoom to liking, then press enter to start')
print 'Left to add, middle to remove nearest, and right to finish'
# global frq_cid
vert_lines=[]
frq_cid = plot_figure.canvas.mpl_connect('button_press_event',lambda event: freq_click(event, [frq_x,fft_ybg,plot_figure,plot_axis,vert_lines,filt_y,filt_ybg,folder_to_save,raw_x]))
plt.show()
plot_figure.canvas.mpl_disconnect(frq_cid)
# vert_lines, frq_x, filt_y, filt_ybg = args_dict["vert_lines"],args_dict["frq_x"],args_dict["filt_y"],args_dict["filt_ybg"]
def save_as_csv(folder_to_save,title, column1_title,column2_title,column1_data,column2_data):
os.chdir(folder_to_save)
with open(title,"w") as f:
writer = csv.writer(f)
writer.writerow([column1_title,column2_title])
writer.writerows(zip(column1_data,column2_data))
os.chdir('..')
def fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save):
""" calculates FFT of data for use in nipping unwanted frequencies"""
# finds FFT of ydata
fft_y = fft(raw_y)
fft_ybg = fft(raw_ybg)
# gets frequencies for FFT of data from array, and sample spacing
frq_x = fftfreq(len(fft_y),((max(raw_x)-min(raw_x))/len(fft_y)))
frq_xbg = fftfreq(len(fft_ybg),((max(raw_xbg)-min(raw_xbg))/len(fft_ybg)))
save_as_csv(folder_to_save,"FFT_Raw_bg_data.csv","frq_x","log(abs(fft_bg))",frq_x,np.log(abs(fft_ybg)))
return frq_x, frq_xbg, fft_y, fft_ybg
def choose_dir():
"""
User chooses where all work will be saved and
time stamp is created for future reference
"""
# Where all work to follow will be saved
folder_to_save = raw_input('Type name of directory to save all data being created\n:')
# make and change to directory named by user
os.mkdir(folder_to_save)
os.chdir(folder_to_save)
# recording date and time that program is run, saving it to folder
with open("time_created.txt", "w") as text_file:
text_file.write("Time this program was run: {} \n".format(time.strftime("%Y-%m-%d %H:%M")))
os.chdir('..')
return folder_to_save
def plotting_data_for_inspection(xdata,ydata,plot_title,plot_xlabel,plot_ylabel,filename_for_saving,folder_to_save, block_boolean):
"""
Plots data for user to look at within program
parameters
----------
xdata,ydata: x and y data to be plotted
plot_xlabel,plot_ylabel: label x and y axes in plot
file_name_for_saving: string given for saving file for later referece
block_boolean: True or False, tells if program waits for figure to close
"""
plot_figure, plot_axis = plt.subplots()
plt.plot(xdata,ydata,color='blue')
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
plt.suptitle(plot_title)
plt.show(block=block_boolean)
os.chdir(folder_to_save)
plt.savefig(filename_for_saving)
os.chdir('..')
return plot_figure, plot_axis
def choose_files(folder_to_save):
"""
Lets user determine which files will be imported for analysis
and saves preferences for reference later on
"""
raw_import = str(raw_input('Enter a raw dataset for analysis\n:'))
print "\nGot it! Importing now... \n"
raw_x,raw_y = import_data(raw_import)
bg_import = str(raw_input('Enter a raw background for analysis\n:'))
print "\nGot it! Importing now... \n"
raw_xbg,raw_ybg = import_data(bg_import)
os.chdir(folder_to_save)
with open("data_files_used.txt", "w") as text_file:
text_file.write("Raw data file used: {} \n".format(raw_import))
text_file.write("Raw background data file used: {}".format(bg_import))
concentration = str(raw_input('Enter concentration of mixture\n:'))
# saving text file of concentration for later use in plotting
with open("concentration.txt","w") as f:
f.write(concentration)
temperature = str(raw_input('Enter temperature of mixture\n:'))
# saving text file of temperature for later use in plotting
with open("temperature.txt","w") as f:
f.write(temperature)
os.chdir('..')
return raw_x, raw_y,raw_xbg,raw_ybg
# assumes a csv file, as all data stored from ice lab is in CSV format
def import_data(filename):
raw_data = np.loadtxt(open(filename,"rb"),delimiter=",")
xdat = raw_data[:,0]
ydat = raw_data[:,1]
return xdat,ydat
def freq_click(event, args_list):
# if button_click = left: add left line
# if button_click = middle: removes closest line
# if button_lick = right: finish
# add clicked data points to list
frq_x,fft_ybg,plot_figure,plot_axis,vert_lines, filt_y, filt_ybg,folder_to_save, raw_x = args_list
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
if event.button==1:
vert_lines.append(event.xdata)
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
#plt.axvline(x=vert_lines[-1],color='black')
for val in vert_lines:
plt.axvline(x=val,color='black')
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# draws points as they are added
plt.draw()
if event.button==2:
# middle click, remove closest vertical line
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(vert_lines-event.xdata).argmin()
del vert_lines[xindx]
for line in vert_lines:
plt.axvline(x=line,color='black')
# draws the new set of vertical lines
plt.draw()
if event.button==3:
# right click, ends clicking awareness
# plot_figure.canvas.mpl_disconnect(frq_cid)
os.chdir(folder_to_save)
plt.savefig('FFT_filter.pdf')
with open("freq_window.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["Xposition of vert. line"])
writer.writerows(zip(vert_lines))
os.chdir('..')
# first window
args_dict ={"vert_lines":vert_lines,"frq_x":frq_x,"filt_y":filt_y,"filt_ybg":filt_ybg}
plt.close("all")
argslist = [vert_lines,frq_x,filt_y,filt_ybg]
filt_y,filt_ybg = window_filter(argslist)
fft_calc(filt_y, filt_ybg, raw_x,folder_to_save)
def fft_calc(filt_y, filt_ybg, raw_x,folder_to_save):
# dividing filtered y data from filtered bg data
norm_fft = ifft(filt_y)/ifft(filt_ybg)
save_as_csv(folder_to_save,"fft_data.csv","raw_x","fft_filt",raw_x,norm_fft.real)
plot_data(raw_x,norm_fft.real,folder_to_save)
def sgf_calc(args_list):
folder_to_save, raw_y, raw_ybg, raw_x = args_list
# warning when using sgf option
warnings.filterwarnings(action="ignore", module="scipy",message="^internal gelsd")
window_param = int(raw_input('Input window box size (must be odd number)\n:'))
poly_param = int(raw_input('Input polynomial order for smoothing\n:'))
# saving parameters chosen for future inspection
os.chdir(folder_to_save)
with open("sgf_params.txt", "w") as sgf_file:
sgf_file.write("Window parameter used: {} \n".format(window_param))
sgf_file.write("Polynomial paramter used: {}".format(poly_param))
#global norm_smooth
smoothed_y = sgf(raw_y,window_param,poly_param,delta=(abs(raw_y)[1]-raw_y)[0])
smoothed_ybg =sgf(raw_ybg,window_param,poly_param,delta=(abs(raw_ybg)[1]-raw_ybg)[0])
# dividing filtered y data from filtered bg data
norm_smooth = smoothed_y / smoothed_ybg
rows = zip(raw_x,norm_smooth)
with open("sgf_data.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["window","polynomail order"])
writer.writerow([window_param,poly_param])
writer.writerow(["raw_x","sgf_filt"])
writer.writerows(rows)
os.chdir('..')
return raw_x,norm_smooth
# range of frequenices to cut out
def window_filter(args_list):
vert_lines, frq_x, filt_y, filt_ybg = args_list
window_min, window_max= vert_lines[-2], vert_lines[-1]
for i in range(len(frq_x)):
if (frq_x[i] >= window_min and frq_x[i] <=window_max) or (frq_x[i]>-1*window_max and frq_x[i]<-1*window_min):
filt_y[i] = 0
filt_ybg[i] = 0
return filt_y,filt_ybg
def plot_data(x,y,folder_to_save):
plot_figure,plot_axis = plotting_data_for_inspection(x,y,"Divide and Filtered Spectrum","Wavenumber cm-1","Relative Intensity","dv_filt_spectrum.pdf",folder_to_save, False)
order = str(raw_input('Zoom to liking and then enter what order polynomial for continuum fit\n:'))
xcoords,ycoords = [],[]
# tells python to turn on awareness for button presses
global cid
cid = plot_figure.canvas.mpl_connect('button_press_event', lambda event: onclick(event, [xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y]))
print 'Left to add, middle to remove nearest, and right to finish'
plt.show()
# for creating continuum fit to divide out
def onclick(event,argslist):
xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y = argslist
global pvals
if event.button==1:
# left click
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
#plt.cla()
try:
# only delete if curve_fit line already drawn
if len(plot_axis.lines) !=1: plot_axis.lines.remove(plot_axis.lines[-1])
except: UnboundLocalError
# add clicked data points to list
xcoords.append(event.xdata)
ycoords.append(event.ydata)
plot_axis.scatter(xcoords,ycoords,color='black')
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is irrelevant
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
# plt.show(block=False)
if event.button==2:
# middle click, remove closest point to click
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(x,y)
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(xcoords-event.xdata).argmin()
del xcoords[xindx]
yindx = np.abs(ycoords-event.ydata).argmin()
del ycoords[yindx]
# draws the new set of scatter points, and colors them
plot_axis.scatter(xcoords,ycoords,color='black')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is ignored
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
if event.button==3:
# right click,ends clicking awareness
plot_figure.canvas.mpl_disconnect(cid)
os.chdir(folder_to_save)
plt.savefig('continuum_chosen.pdf')
# Saving polynomial eqn used in continuum divide for reference
with open("continuum_polynomial.txt", "w") as save_file:
save_file.write("%s *x^ %d " %(pvals[0],0))
for i in (xrange(len(pvals))):
save_file.write("+ %s *x^ %d " %(pvals[i+1],i+1))
os.chdir('..')
calc_coeffs(pvals,x,y,folder_to_save)
def calc_coeffs(pvals,x,y,folder_to_save):
fit_y = pvals(x)
# flattens the continuum
new_continuum = y / fit_y
thickness = int(raw_input('\nEnter thickness of cell in cm\n:'))
# 2 cm thickness for our work in 2016
# remove runtime errors when taking negative log and dividing
err_settings = np.seterr(invalid='ignore')
alpha_coeffs = -np.log(new_continuum) / thickness
plotting_data_for_inspection(x,alpha_coeffs,"Alpha Coefficients","Wavenumber cm-1","Absorption cm-1","alpha_coeffs.pdf",folder_to_save,False)
save_as_csv(folder_to_save,"alpha_coeffs.csv","x","alpha",x,alpha_coeffs)
# creating masks around each peak
x_mask1 = x[(x>10000) & (x<10500)]
x_mask2 = x[(x>11200) & (x<12000)]
y_mask1 = alpha_coeffs[(x>10000) & (x<10500)]
y_mask2 = alpha_coeffs[(x>11200) & (x<12000)]
# writing data for plotting later
save_as_csv(folder_to_save,"10000_peak.csv","x","y",x_mask1,y_mask1)
save_as_csv(folder_to_save,"11200_peak.csv","x","y",x_mask2,y_mask2)
# integrated area calcs
area10000=trapz(y_mask1,x_mask1)
area11200=trapz(y_mask2,x_mask2)
os.chdir(folder_to_save)
with open("10000area.txt","w") as f:
f.write(str(area10000))
with open("11200area.txt","w") as f:
f.write(str(area11200))
os.chdir('..')
finish_prog = raw_input("Press 'y' when finished\n:")
check = True
while check:
if (finish_prog =="y"): check = False
plt.close('all')
print "Finished!"
quit() # end of program
if __name__ == '__main__':
main()
| mit |
qsnake/numpy | numpy/linalg/linalg.py | 1 | 61121 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| bsd-3-clause |
QuLogic/iris | docs/iris/example_code/Meteorology/lagged_ensemble.py | 12 | 5993 | """
Seasonal ensemble model plots
=============================
This example demonstrates the loading of a lagged ensemble dataset from the
GloSea4 model, which is then used to produce two types of plot:
* The first shows the "postage stamp" style image with an array of 14 images,
one for each ensemble member with a shared colorbar. (The missing image in
this example represents ensemble member number 6 which was a failed run)
* The second plot shows the data limited to a region of interest, in this case
a region defined for forecasting ENSO (El Nino-Southern Oscillation), which,
for the purposes of this example, has had the ensemble mean subtracted from
each ensemble member to give an anomaly surface temperature. In practice a
better approach would be to take the climatological mean, calibrated to the
model, from each ensemble member.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.plot as iplt
def realization_metadata(cube, field, fname):
"""
A function which modifies the cube's metadata to add a "realization"
(ensemble member) coordinate from the filename if one doesn't already exist
in the cube.
"""
# add an ensemble member coordinate if one doesn't already exist
if not cube.coords('realization'):
# the ensemble member is encoded in the filename as *_???.pp where ???
# is the ensemble member
realization_number = fname[-6:-3]
import iris.coords
realization_coord = iris.coords.AuxCoord(np.int32(realization_number),
'realization')
cube.add_aux_coord(realization_coord)
def main():
# extract surface temperature cubes which have an ensemble member
# coordinate, adding appropriate lagged ensemble metadata
surface_temp = iris.load_cube(
iris.sample_data_path('GloSea4', 'ensemble_???.pp'),
iris.Constraint('surface_temperature', realization=lambda value: True),
callback=realization_metadata,
)
# -------------------------------------------------------------------------
# Plot #1: Ensemble postage stamps
# -------------------------------------------------------------------------
# for the purposes of this example, take the last time element of the cube
last_timestep = surface_temp[:, -1, :, :]
# Make 50 evenly spaced levels which span the dataset
contour_levels = np.linspace(np.min(last_timestep.data),
np.max(last_timestep.data),
50)
# Create a wider than normal figure to support our many plots
plt.figure(figsize=(12, 6), dpi=100)
# Also manually adjust the spacings which are used when creating subplots
plt.gcf().subplots_adjust(hspace=0.05, wspace=0.05, top=0.95, bottom=0.05,
left=0.075, right=0.925)
# iterate over all possible latitude longitude slices
for cube in last_timestep.slices(['latitude', 'longitude']):
# get the ensemble member number from the ensemble coordinate
ens_member = cube.coord('realization').points[0]
# plot the data in a 4x4 grid, with each plot's position in the grid
# being determined by ensemble member number the special case for the
# 13th ensemble member is to have the plot at the bottom right
if ens_member == 13:
plt.subplot(4, 4, 16)
else:
plt.subplot(4, 4, ens_member+1)
cf = iplt.contourf(cube, contour_levels)
# add coastlines
plt.gca().coastlines()
# make an axes to put the shared colorbar in
colorbar_axes = plt.gcf().add_axes([0.35, 0.1, 0.3, 0.05])
colorbar = plt.colorbar(cf, colorbar_axes, orientation='horizontal')
colorbar.set_label('%s' % last_timestep.units)
# limit the colorbar to 8 tick marks
import matplotlib.ticker
colorbar.locator = matplotlib.ticker.MaxNLocator(8)
colorbar.update_ticks()
# get the time for the entire plot
time_coord = last_timestep.coord('time')
time = time_coord.units.num2date(time_coord.bounds[0, 0])
# set a global title for the postage stamps with the date formated by
# "monthname year"
plt.suptitle('Surface temperature ensemble forecasts for %s' % (
time.strftime('%B %Y'), ))
iplt.show()
# -------------------------------------------------------------------------
# Plot #2: ENSO plumes
# -------------------------------------------------------------------------
# Nino 3.4 lies between: 170W and 120W, 5N and 5S, so define a constraint
# which matches this
nino_3_4_constraint = iris.Constraint(
longitude=lambda v: -170+360 <= v <= -120+360,
latitude=lambda v: -5 <= v <= 5)
nino_cube = surface_temp.extract(nino_3_4_constraint)
# Subsetting a circular longitude coordinate always results in a circular
# coordinate, so set the coordinate to be non-circular
nino_cube.coord('longitude').circular = False
# Calculate the horizontal mean for the nino region
mean = nino_cube.collapsed(['latitude', 'longitude'], iris.analysis.MEAN)
# Calculate the ensemble mean of the horizontal mean. To do this, remove
# the "forecast_period" and "forecast_reference_time" coordinates which
# span both "relalization" and "time".
mean.remove_coord("forecast_reference_time")
mean.remove_coord("forecast_period")
ensemble_mean = mean.collapsed('realization', iris.analysis.MEAN)
# take the ensemble mean from each ensemble member
mean -= ensemble_mean.data
plt.figure()
for ensemble_member in mean.slices(['time']):
# draw each ensemble member as a dashed line in black
iplt.plot(ensemble_member, '--k')
plt.title('Mean temperature anomaly for ENSO 3.4 region')
plt.xlabel('Time')
plt.ylabel('Temperature anomaly / K')
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
lessc0de/neuroelectro_org | article_text_mining/deprecated/db_add_full_text.py | 2 | 12121 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 20 09:54:11 2012
@author: Shreejoy
"""
import re
from urllib2 import urlopen, URLError, HTTPError
import time
from HTMLParser import HTMLParseError
from matplotlib.pylab import *
from bs4 import BeautifulSoup
from neuroelectro.models import Article
from neuroelectro.models import DataTable, ArticleFullText
from db_functions.pubmed_functions import add_articles
def get_full_text_links():
NUMHITS = 50
firstInd = 4900
maxURLTries = 5
waitTime = 60
totalArticles = NUMHITS + firstInd + 1 # just set this later when it gets searched
searchLinkFull = 'http://jp.physoc.org/search?tmonth=&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=standard&firstpage=&fmonth=&title=&tyear=&hits=' + str(NUMHITS) + '&titleabstract=&volume=&sortspec=relevance&andorexacttitleabs=and&author2=&tocsectionid=all&andorexactfulltext=and&author1=&fyear=&doi=&fulltext=membrane%20potential%20neuron&FIRSTINDEX=' + str(firstInd)
# 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012'
# searchLinkBase = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
fullTextLinks = []
pdfLinks = []
while firstInd + NUMHITS <= totalArticles:
print 'searching %d of %d articles' % (firstInd, totalArticles)
# searchLinkFull = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
# searchLinkFull = 'http://www.jneurosci.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&volume=&sortspec=date&andorexacttitleabs=and&author2=&tocsectionid=all&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=input%20resistance%20neuron&FIRSTINDEX=' + str(firstInd)
handle = urlopen(searchLinkFull) # open the url
data = handle.read() # read the data
soup = BeautifulSoup(data)
# print BeautifulSoup.prettify(soup)
headerString = soup.find_all('h1')[1].string
print headerString
numTries = 1
while (headerString == 'Currently Unavailable' or headerString == 'Your search criteria matched no articles') and numTries < maxURLTries:
print 'URL search failed %d times' % numTries
print 'now waiting %d secs before trying search again' % (waitTime*numTries)
time.sleep(waitTime*numTries)
handle = urlopen(searchLinkFull) # open the url
data = handle.read() # read the data
soup = BeautifulSoup(data)
headerString = soup.find_all('h1')[1].string
numTries += 1
if numTries == maxURLTries:
print 'URL search failed %d times' % maxURLTries
print 'skipping'
continue
# get all links to full text docs
if totalArticles == NUMHITS + firstInd + 1:
totalArticles = int(soup.find('span', {'class':'results-total'}).text)
for link in soup.find_all('a'):
# print link.get('rel')
if link.string == 'Full Text':
currLink = link.get('href')
fullTextLinks.append(currLink)
elif link.string == 'Full Text (PDF)':
currLink = link.get('href')
pdfLinks.append(currLink)
# print(link.get('href'))
firstInd += NUMHITS
print 'now waiting %d secs before next search' % waitTime
time.sleep(waitTime)
return (fullTextLinks, pdfLinks)
def get_full_text_from_link(fullTextLink):
# searchLinkFull = 'http://jn.physiology.org/search?tmonth=Mar&pubdate_year=&submit=yes&submit=yes&submit=Submit&andorexacttitle=and&format=condensed&firstpage=&fmonth=Jan&title=&tyear=2012&hits=' + str(NUMHITS) + '&titleabstract=&flag=&journalcode=jn&volume=&sortspec=date&andorexacttitleabs=and&author2=&andorexactfulltext=and&author1=&fyear=1997&doi=&fulltext=%22input%20resistance%22%20AND%20neuron&FIRSTINDEX=' + str(firstInd)
# fullTextLink = 'http://jn.physiology.org/content/107/6/1655.full'
(soup, fullTextLink) = soupify_plus(fullTextLink)
# isPdf = checkPdf(fullTextLink)
if soup is False:
return False
# print BeautifulSoup.prettify(soup)
# find pubmed link and pmid
try:
tempStr = re.search('access_num=[\d]+', str(soup('a',text=('PubMed citation'))[0])).group()
except (IndexError):
return False
pmid = int(re.search('\d+', tempStr).group())
if Article.objects.filter(pmid = pmid).count() == 0:
# add pmid abstract and stuff to db Article list
add_articles([pmid])
# get Article object
art = Article.objects.get(pmid = pmid)
art.full_text_link = fullTextLink
art.save()
# get article full text and save to object
fullTextTag = soup.find('div', {'class':'article fulltext-view'})
# fullTextTag = soup.find('div', {'itemprop':'articleBody'})
# # tag finder for j-neurophys
# tags = soup.find_all('div')
# for t in tags:
# if 'itemprop' in t.attrs and t['itemprop'] == 'articleBody':
# fullTextTag = t
# break
fullTextOb = ArticleFullText.objects.get_or_create(article = art)[0]
fullTextOb.full_text = str(fullTextTag)
fullTextOb.save()
# get article data table links
# just count number of tables in article
# tableList = []
numTables = 0
for link in soup.find_all('a'):
linkText = link.get('href')
if link.string == 'In this window' and linkText.find('T') != -1:
print(linkText)
numTables += 1
# tableList.append(linkText)
print 'adding %d tables' % (numTables)
for i in range(numTables):
tableLink = fullTextLink[:-5] + '/T' + str(i+1)
tableSoup = soupify(tableLink)
if tableSoup is False:
continue
tableTag = tableSoup.find('div', {'class':'table-expansion'})
if tableTag is None:
tableTag = soup.find('div', {'itemprop':'articleBody'})
dataTableOb = DataTable.objects.get_or_create(link = tableLink, article = art)[0]
table_html = str(tableTag)
dataTableOb.table_html = table_html
table_text = tableTag.get_text()
table_text = table_text[0:min(9999,len(table_text))]
dataTableOb.table_text = table_text
dataTableOb.save()
elinkBase = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id=%d&cmd=prlinks&retmode=ref'
def get_full_text_from_pmid(pmids):
cnt = 0
for pmid in pmids:
print '%d of %d articles' % (cnt, len(pmids))
link = elinkBase % pmid
get_full_text_from_link(link)
cnt += 1
elinkBase = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id=%d&cmd=prlinks&retmode=ref'
def get_pdf_from_pmid(pmids):
os.chdir('C:\Python27\Scripts\Biophys\pdf_dir')
cnt = 0
failedPmids = []
fullTextPmids = [int(a.pmid) for a in Article.objects.filter(articlefulltext__isnull = False)]
diffSet = set(pmids).difference(set(fullTextPmids))
for pmid in diffSet:
print '%d of %d articles' % (cnt, len(diffSet))
link = elinkBase % pmid
success = get_pdf_from_link(link, pmid)
if success == False:
failedPmids.append(pmid)
cnt += 1
os.chdir('C:\Python27\Scripts\Biophys')
return failedPmids
MAXURLTRIES = 2
MAXTITLELEN = 100
def get_pdf_from_link(link, pmid):
success = False
numTries = 0
waitTimeLong = 180
waitTimeShort = 2
a = Article.objects.get(pmid = pmid)
title = a.title
title = '%d_%s' % (pmid, title)
title = re.sub('\s', '_', title)
pattern = '[a-zA-Z0-9_]'
title = ''.join(re.findall(pattern, title))
fileName = title[0:min(MAXTITLELEN, len(title))]
fileName = fileName + '.pdf'
while numTries < MAXURLTRIES and success == False:
try:
handle = urlopen(link) # open the url
url = handle.geturl()
newUrl = re.sub('.long', '.full.pdf', url)
handle = urlopen(newUrl)
data = handle.read() # read the data
f = open(fileName, 'wb')
f.write(data)
f.close()
# soup = BeautifulSoup(data)
success = True
time.sleep(waitTimeShort)
except (URLError, HTTPError, HTMLParseError):
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
url = ''
return success
def get_full_text_from_link_all(fullTextLinkList):
cnt = 0
for link in fullTextLinkList:
print '%d of %d articles' % (cnt, len(fullTextLinkList))
get_full_text_from_link(link)
cnt += 1
def convert_links(linkList):
retLinkList = []
# linkBase = 'http://jn.physiology.org'
linkBase = 'http://www.jneurosci.org'
for currLink in linkList:
linkPart = re.search('[\d\w/]+.', currLink).group()[:-1]
newLink = linkBase + linkPart + '.full'
retLinkList.append(newLink)
return retLinkList
MAXURLTRIES = 2
def soupify_plus(link):
success = False
numTries = 0
waitTimeLong = 180
waitTimeShort = 2
while numTries < MAXURLTRIES and success == False:
try:
handle = urlopen(link) # open the url
url = handle.geturl()
data = handle.read() # read the data
soup = BeautifulSoup(data)
success = True
time.sleep(waitTimeShort)
except (URLError, HTTPError, HTMLParseError):
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
url = ''
if numTries == MAXURLTRIES:
soup = False
return (soup, url)
#MAXURLTRIES = 2
def soupify(link):
success = False
numTries = 0
waitTimeLong = 180
waitTimeShort = 2
while numTries < MAXURLTRIES and success == False:
try:
handle = urlopen(link) # open the url
data = handle.read() # read the data
soup = BeautifulSoup(data)
success = True
time.sleep(waitTimeShort)
except (URLError, HTTPError, HTMLParseError):
print link + ' failed %s times' % numTries
numTries += 1
print 'now waiting %d secs before trying search again' % (waitTimeLong*numTries)
time.sleep(waitTimeLong*numTries)
if numTries == MAXURLTRIES:
soup = False
return soup
def checkPdf(link):
newLink = re.sub(r'+html', '', link)
if re.search(r'.pdf', newLink) is not None:
isPdf = True
return (isPdf, newLink) | gpl-2.0 |
maryklayne/Funcao | sympy/interactive/session.py | 13 | 16002 | """Tools for setting up interactive sessions. """
from __future__ import print_function, division
from sympy.external import import_module
from sympy.interactive.printing import init_printing
preexec_source = """\
from __future__ import division
from sympy import *
x, y, z, t = symbols('x y z t')
k, m, n = symbols('k m n', integer=True)
f, g, h = symbols('f g h', cls=Function)
init_printing()
"""
verbose_message = """\
These commands were executed:
%(source)s
Documentation can be found at http://www.sympy.org
"""
no_ipython = """\
Couldn't locate IPython. Having IPython installed is greatly recommended.
See http://ipython.scipy.org for more details. If you use Debian/Ubuntu,
just install the 'ipython' package and start isympy again.
"""
def _make_message(ipython=True, quiet=False, source=None):
"""Create a banner for an interactive session. """
from sympy import __version__ as sympy_version
from sympy.polys.domains import GROUND_TYPES
from sympy.utilities.misc import ARCH
from sympy import SYMPY_DEBUG
import sys
import os
python_version = "%d.%d.%d" % sys.version_info[:3]
if ipython:
shell_name = "IPython"
else:
shell_name = "Python"
info = ['ground types: %s' % GROUND_TYPES]
cache = os.getenv('SYMPY_USE_CACHE')
if cache is not None and cache.lower() == 'no':
info.append('cache: off')
if SYMPY_DEBUG:
info.append('debugging: on')
args = shell_name, sympy_version, python_version, ARCH, ', '.join(info)
message = "%s console for SymPy %s (Python %s-%s) (%s)\n" % args
if not quiet:
if source is None:
source = preexec_source
_source = ""
for line in source.split('\n')[:-1]:
if not line:
_source += '\n'
else:
_source += '>>> ' + line + '\n'
message += '\n' + verbose_message % {'source': _source}
return message
def int_to_Integer(s):
"""
Wrap integer literals with Integer.
This is based on the decistmt example from
http://docs.python.org/library/tokenize.html.
Only integer literals are converted. Float literals are left alone.
Example
=======
>>> from __future__ import division
>>> from sympy.interactive.session import int_to_Integer
>>> from sympy import Integer
>>> s = '1.2 + 1/2 - 0x12 + a1'
>>> int_to_Integer(s)
'1.2 +Integer (1 )/Integer (2 )-Integer (0x12 )+a1 '
>>> s = 'print (1/2)'
>>> int_to_Integer(s)
'print (Integer (1 )/Integer (2 ))'
>>> exec(s)
0.5
>>> exec(int_to_Integer(s))
1/2
"""
from tokenize import generate_tokens, untokenize, NUMBER, NAME, OP
from sympy.core.compatibility import StringIO
def _is_int(num):
"""
Returns true if string value num (with token NUMBER) represents an integer.
"""
# XXX: Is there something in the standard library that will do this?
if '.' in num or 'j' in num.lower() or 'e' in num.lower():
return False
return True
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and _is_int(tokval): # replace NUMBER tokens
result.extend([
(NAME, 'Integer'),
(OP, '('),
(NUMBER, tokval),
(OP, ')')
])
else:
result.append((toknum, tokval))
return untokenize(result)
# XXX: Something like this might be used, but it only works on single line
# inputs. See
# http://mail.scipy.org/pipermail/ipython-user/2012-August/010846.html and
# https://github.com/ipython/ipython/issues/1491. So instead we are forced to
# just monkey-patch run_cell until IPython builds a better API.
#
# class IntTransformer(object):
# """
# IPython command line transformer that recognizes and replaces int
# literals.
#
# Based on
# https://bitbucket.org/birkenfeld/ipython-physics/src/71b2d850da00/physics.py.
#
# """
# priority = 99
# enabled = True
# def transform(self, line, continue_prompt):
# import re
# from tokenize import TokenError
# leading_space = re.compile(' *')
# spaces = re.match(leading_space, line).span()[1]
# try:
# return ' '*spaces + int_to_Integer(line)
# except TokenError:
# return line
#
# int_transformer = IntTransformer()
#
# def enable_automatic_int_sympification(app):
# """
# Allow IPython to automatically convert integer literals to Integer.
#
# This lets things like 1/2 be executed as (essentially) Rational(1, 2).
# """
# app.shell.prefilter_manager.register_transformer(int_transformer)
def enable_automatic_int_sympification(app):
"""
Allow IPython to automatically convert integer literals to Integer.
"""
hasshell = hasattr(app, 'shell')
import ast
if hasshell:
old_run_cell = app.shell.run_cell
else:
old_run_cell = app.run_cell
def my_run_cell(cell, *args, **kwargs):
try:
# Check the cell for syntax errors. This way, the syntax error
# will show the original input, not the transformed input. The
# downside here is that IPython magic like %timeit will not work
# with transformed input (but on the other hand, IPython magic
# that doesn't expect transformed input will continue to work).
ast.parse(cell)
except SyntaxError:
pass
else:
cell = int_to_Integer(cell)
old_run_cell(cell, *args, **kwargs)
if hasshell:
app.shell.run_cell = my_run_cell
else:
app.run_cell = my_run_cell
def enable_automatic_symbols(app):
"""Allow IPython to automatially create symbols (``isympy -a``). """
# XXX: This should perhaps use tokenize, like int_to_Integer() above.
# This would avoid re-executing the code, which can lead to subtle
# issues. For example:
#
# In [1]: a = 1
#
# In [2]: for i in range(10):
# ...: a += 1
# ...:
#
# In [3]: a
# Out[3]: 11
#
# In [4]: a = 1
#
# In [5]: for i in range(10):
# ...: a += 1
# ...: print b
# ...:
# b
# b
# b
# b
# b
# b
# b
# b
# b
# b
#
# In [6]: a
# Out[6]: 12
#
# Note how the for loop is executed again because `b` was not defined, but `a`
# was already incremented once, so the result is that it is incremented
# multiple times.
import re
re_nameerror = re.compile(
"name '(?P<symbol>[A-Za-z_][A-Za-z0-9_]*)' is not defined")
def _handler(self, etype, value, tb, tb_offset=None):
"""Handle :exc:`NameError` exception and allow injection of missing symbols. """
if etype is NameError and tb.tb_next and not tb.tb_next.tb_next:
match = re_nameerror.match(str(value))
if match is not None:
# XXX: Make sure Symbol is in scope. Otherwise you'll get infinite recursion.
self.run_cell("%(symbol)s = Symbol('%(symbol)s')" %
{'symbol': match.group("symbol")}, store_history=False)
try:
code = self.user_ns['In'][-1]
except (KeyError, IndexError):
pass
else:
self.run_cell(code, store_history=False)
return None
finally:
self.run_cell("del %s" % match.group("symbol"),
store_history=False)
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset)
self._showtraceback(etype, value, stb)
if hasattr(app, 'shell'):
app.shell.set_custom_exc((NameError,), _handler)
else:
# This was restructured in IPython 0.13
app.set_custom_exc((NameError,), _handler)
def init_ipython_session(argv=[], auto_symbols=False, auto_int_to_Integer=False):
"""Construct new IPython session. """
import IPython
if IPython.__version__ >= '0.11':
# use an app to parse the command line, and init config
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if IPython.__version__ >= '1.0':
from IPython.terminal import ipapp
else:
from IPython.frontend.terminal import ipapp
app = ipapp.TerminalIPythonApp()
# don't draw IPython banner during initialization:
app.display_banner = False
app.initialize(argv)
if auto_symbols:
readline = import_module("readline")
if readline:
enable_automatic_symbols(app)
if auto_int_to_Integer:
enable_automatic_int_sympification(app)
return app.shell
else:
from IPython.Shell import make_IPython
return make_IPython(argv)
def init_python_session():
"""Construct new Python session. """
from code import InteractiveConsole
class SymPyConsole(InteractiveConsole):
"""An interactive console with readline support. """
def __init__(self):
InteractiveConsole.__init__(self)
try:
import readline
except ImportError:
pass
else:
import os
import atexit
readline.parse_and_bind('tab: complete')
if hasattr(readline, 'read_history_file'):
history = os.path.expanduser('~/.sympy-history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
return SymPyConsole()
def init_session(ipython=None, pretty_print=True, order=None,
use_unicode=None, use_latex=None, quiet=False, auto_symbols=False,
auto_int_to_Integer=False, argv=[]):
"""
Initialize an embedded IPython or Python session. The IPython session is
initiated with the --pylab option, without the numpy imports, so that
matplotlib plotting can be interactive.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: boolean or None
If True, use latex rendering if IPython GUI's;
if False, do not use latex rendering.
quiet: boolean
If True, init_session will not print messages regarding its status;
if False, init_session will print messages regarding its status.
auto_symbols: boolean
If True, IPython will automatically create symbols for you.
If False, it will not.
The default is False.
auto_int_to_Integer: boolean
If True, IPython will automatically wrap int literals with Integer, so
that things like 1/2 give Rational(1, 2).
If False, it will not.
The default is False.
ipython: boolean or None
If True, printing will initialize for an IPython console;
if False, printing will initialize for a normal console;
The default is None, which automatically determines whether we are in
an ipython instance or not.
argv: list of arguments for IPython
See sympy.bin.isympy for options that can be used to initialize IPython.
See Also
========
sympy.interactive.printing.init_printing: for examples and the rest of the parameters.
Examples
========
>>> from sympy import init_session, Symbol, sin, sqrt
>>> sin(x) #doctest: +SKIP
NameError: name 'x' is not defined
>>> init_session() #doctest: +SKIP
>>> sin(x) #doctest: +SKIP
sin(x)
>>> sqrt(5) #doctest: +SKIP
___
\/ 5
>>> init_session(pretty_print=False) #doctest: +SKIP
>>> sqrt(5) #doctest: +SKIP
sqrt(5)
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + x + y**2 + y
>>> init_session(order='grlex') #doctest: +SKIP
>>> y + x + y**2 + x**2 #doctest: +SKIP
x**2 + y**2 + x + y
>>> init_session(order='grevlex') #doctest: +SKIP
>>> y * x**2 + x * y**2 #doctest: +SKIP
x**2*y + x*y**2
>>> init_session(order='old') #doctest: +SKIP
>>> x**2 + y**2 + x + y #doctest: +SKIP
x + y + x**2 + y**2
>>> theta = Symbol('theta') #doctest: +SKIP
>>> theta #doctest: +SKIP
theta
>>> init_session(use_unicode=True) #doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
"""
import sys
in_ipython = False
if ipython is not False:
try:
import IPython
except ImportError:
if ipython is True:
raise RuntimeError("IPython is not available on this system")
ip = None
else:
if IPython.__version__ >= '0.11':
try:
ip = get_ipython()
except NameError:
ip = None
else:
ip = IPython.ipapi.get()
if ip:
ip = ip.IP
in_ipython = bool(ip)
if ipython is None:
ipython = in_ipython
if ipython is False:
ip = init_python_session()
mainloop = ip.interact
else:
if ip is None:
ip = init_ipython_session(argv=argv, auto_symbols=auto_symbols,
auto_int_to_Integer=auto_int_to_Integer)
if IPython.__version__ >= '0.11':
# runsource is gone, use run_cell instead, which doesn't
# take a symbol arg. The second arg is `store_history`,
# and False means don't add the line to IPython's history.
ip.runsource = lambda src, symbol='exec': ip.run_cell(src, False)
#Enable interactive plotting using pylab.
try:
ip.enable_pylab(import_all=False)
except Exception:
# Causes an import error if matplotlib is not installed.
# Causes other errors (depending on the backend) if there
# is no display, or if there is some problem in the
# backend, so we have a bare "except Exception" here
pass
if not in_ipython:
mainloop = ip.mainloop
readline = import_module("readline")
if auto_symbols and (not ipython or IPython.__version__ < '0.11' or not readline):
raise RuntimeError("automatic construction of symbols is possible only in IPython 0.11 or above with readline support")
if auto_int_to_Integer and (not ipython or IPython.__version__ < '0.11'):
raise RuntimeError("automatic int to Integer transformation is possible only in IPython 0.11 or above")
_preexec_source = preexec_source
ip.runsource(_preexec_source, symbol='exec')
init_printing(pretty_print=pretty_print, order=order,
use_unicode=use_unicode, use_latex=use_latex, ip=ip)
message = _make_message(ipython, quiet, _preexec_source)
if not in_ipython:
mainloop(message)
sys.exit('Exiting ...')
else:
ip.write(message)
import atexit
atexit.register(lambda ip: ip.write("Exiting ...\n"), ip)
| bsd-3-clause |
huji-nlp/tupa | setup.py | 1 | 2023 | #!/usr/bin/env python
import os
import sys
from subprocess import run
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
from tupa.__version__ import VERSION
try:
this_file = __file__
except NameError:
this_file = sys.argv[0]
os.chdir(os.path.dirname(os.path.abspath(this_file)))
with open("requirements.txt") as f:
install_requires = f.read().splitlines()
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
class install(_install):
# noinspection PyBroadException
def run(self):
# Install requirements
self.announce("Installing dependencies...")
run(["pip", "--no-cache-dir", "install"] + install_requires, check=True)
# Install actual package
_install.run(self)
setup(name="TUPA",
version=VERSION,
description="Transition-based UCCA Parser",
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing :: Linguistic",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
author="Daniel Hershcovich",
author_email="[email protected]",
url="https://github.com/huji-nlp/tupa",
install_requires=install_requires,
extras_require={"server": open(os.path.join("server", "requirements.txt")).read().splitlines(),
"viz": ["scipy", "pillow", "matplotlib"],
"bert": open("requirements.bert.txt").read().splitlines()},
packages=find_packages(),
cmdclass={"install": install},
entry_points={"console_scripts": ["tupa = tupa.__main__:main"]},
)
| gpl-3.0 |
yassersouri/pitch-annotator | utils/gen_groundTruth.py | 1 | 1608 | import scipy.io
import glob
import json
import cv2
import numpy
import skimage.morphology
from matplotlib import pylab as plt
json_file_name = '/Users/yasser/sci-repo/pitchdataset/groundTruth/train/3.json'
img_file_name = '/Users/yasser/sci-repo/pitchdataset/images/train/3.jpg'
def gen_one_ground_truth(json_file_name, img_file_name):
img = cv2.imread(img_file_name)
shape = img.shape[0:2]
lines = 0
with open(json_file_name, 'r') as f:
lines = json.loads(f.read())
# allocate memory
edge = numpy.zeros(shape, dtype=numpy.uint8)
segs_pre = numpy.ones(shape, dtype=numpy.uint8) * 255
segs = numpy.zeros(shape, dtype=numpy.uint8)
# drawlines
for line in lines:
cv2.line(segs_pre, (int(line['x1']), int(line['y1'])), (int(line['x2']), int(line['y2'])), (0, 0, 0), thickness=1)
cv2.line(edge, (int(line['x1']), int(line['y1'])), (int(line['x2']), int(line['y2'])), (255, 255, 255), thickness=1)
# find segments
segs = skimage.morphology.label(segs_pre, 4, 0)
# label pixels with value of -1 (edge pixels)
for i in range(segs.shape[0]):
for j in range(segs.shape[1]):
if segs[i, j] == -1:
if i-1 > 0:
if segs[i-1, j] != -1:
segs[i, j] = segs[i-1, j]
elif j-1 > 0:
if segs[i, j-1] != -1:
segs[i, j] = segs[i, j-1]
result = {'edge': edge, 'segs': segs}
return result
def main():
gen_one_ground_truth(json_file_name, img_file_name)
if __name__ == '__main__':
main() | gpl-2.0 |
jseabold/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 272 | 7798 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| bsd-3-clause |
ElDeveloper/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <[email protected]>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
CIFASIS/VDiscover | vdiscover/Cluster.py | 1 | 15662 | """
This file is part of VDISCOVER.
VDISCOVER is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
VDISCOVER is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with VDISCOVER. If not, see <http://www.gnu.org/licenses/>.
Copyright 2014 by G.Grieco
"""
import random
import gzip
import sys
import csv
import subprocess
import pickle
import numpy as np
import matplotlib as mpl
# hack from https://stackoverflow.com/questions/2801882/generating-a-png-with-matplotlib-when-display-is-undefined to avoid using X
# mpl.use('Agg')
import matplotlib.pyplot as plt
from Utils import *
from Pipeline import *
# def Cluster(X, labels)
"""
assert(len(X_red) == len(labels))
from sklearn.cluster import MeanShift, estimate_bandwidth
bandwidth = estimate_bandwidth(X, quantile=0.2)
print "Clustering with bandwidth:", bandwidth
af = MeanShift(bandwidth=bandwidth/1).fit(X_red)
cluster_centers = af.cluster_centers_
cluster_labels = af.labels_
n_clusters = len(cluster_centers)
plt.figure()
for ([x,y],label, cluster_label) in zip(X_red,labels, cluster_labels):
x = gauss(0,0.1) + x
y = gauss(0,0.1) + y
plt.scatter(x, y, c = colors[cluster_label % ncolors])
#plt.text(x-0.05, y+0.01, label.split("/")[-1])
for i,[x,y] in enumerate(cluster_centers):
plt.plot(x, y, 'o', markerfacecolor=colors[i % ncolors],
markeredgecolor='k', markersize=7)
plt.title('Estimated number of clusters: %d' % n_clusters)
"""
# return zip(labels, cluster_labels)
batch_size = 25
window_size = 32
maxlen = window_size
embedding_dims = 5
nb_filters = 50
filter_length = 3
hidden_dims = 50
nb_epoch = 3
def ClusterCnn(model_file, train_file, valid_file, ftype, nsamples, outdir):
f = open(model_file + ".pre")
preprocessor = pickle.load(f)
import h5py
f = h5py.File(model_file + ".wei")
layers = []
for k in range(f.attrs['nb_layers']):
g = f['layer_{}'.format(k)]
layers.append([g['param_{}'.format(p)]
for p in range(g.attrs['nb_params'])])
max_features = len(preprocessor.tokenizer.word_counts)
print "Reading and sampling data to train.."
train_programs, train_features, train_classes = read_traces(
train_file, nsamples, cut=None)
train_size = len(train_features)
#y = train_programs
X_train, y_train, labels = preprocessor.preprocess_traces(
train_features, y_data=train_classes, labels=train_programs)
new_model = make_cluster_cnn(
"test",
max_features,
maxlen,
embedding_dims,
nb_filters,
filter_length,
hidden_dims,
None,
weights=layers)
train_dict = dict()
train_dict[ftype] = new_model.predict(X_train)
model = make_cluster_pipeline_subtraces(ftype)
X_red_comp = model.fit_transform(train_dict)
explained_var = np.var(X_red_comp, axis=0)
print explained_var
X_red = X_red_comp[:, 0:2]
X_red_next = X_red_comp[:, 2:4]
colors = mpl.colors.cnames.keys()
progs = list(set(labels))
ncolors = len(colors)
size = len(labels)
print "Plotting.."
for prog, [x, y] in zip(labels, X_red):
# for prog,[x,y] in sample(zip(labels, X_red), min(size, 1000)):
x = gauss(0, 0.05) + x
y = gauss(0, 0.05) + y
color = 'r'
plt.scatter(x, y, c=color)
"""
if valid_file is not None:
valid_programs, valid_features, valid_classes = read_traces(valid_file, None, cut=None, maxsize=window_size) #None)
valid_dict = dict()
X_valid, _, valid_labels = preprocessor.preprocess_traces(valid_features, y_data=None, labels=valid_programs)
valid_dict[ftype] = new_model.predict(X_valid)
X_red_valid_comp = model.transform(valid_dict)
X_red_valid = X_red_valid_comp[:,0:2]
X_red_valid_next = X_red_valid_comp[:,2:4]
for prog,[x,y] in zip(valid_labels, X_red_valid):
x = gauss(0,0.05) + x
y = gauss(0,0.05) + y
plt.scatter(x, y, c='b')
plt.text(x, y+0.02, prog.split("/")[-1])
plt.show()
"""
plt.savefig(train_file.replace(".gz", "") + ".png")
print "Bandwidth estimation.."
from sklearn.cluster import MeanShift, estimate_bandwidth
X_red_sample = X_red[:min(size, 1000)]
bandwidth = estimate_bandwidth(X_red_sample, quantile=0.2)
print "Clustering with bandwidth:", bandwidth
#X_red = np.vstack((X_red,X_red_valid))
#X_red_next = np.vstack((X_red_next,X_red_valid_next))
#labels = labels + valid_labels
print X_red.shape, len(X_red), len(labels)
# print valid_labels
af = MeanShift(bandwidth=bandwidth / 1).fit(X_red)
cluster_centers = af.cluster_centers_
cluster_labels = af.labels_
n_clusters = len(cluster_centers)
plt.figure()
for ([x, y], label, cluster_label) in zip(X_red, labels, cluster_labels):
# for ([x,y],label, cluster_label) in sample(zip(X_red,labels,
# cluster_labels), min(size, 1000)):
x = gauss(0, 0.1) + x
y = gauss(0, 0.1) + y
plt.scatter(x, y, c=colors[cluster_label % ncolors])
# print label
# if label in valid_labels:
# plt.text(x-0.05, y+0.01, label.split("/")[-1])
for i, [x, y] in enumerate(cluster_centers):
plt.plot(x, y, 'o', markerfacecolor=colors[i % ncolors],
markeredgecolor='k', markersize=7)
"""
#for prog,[x,y] in zip(valid_labels, X_red_valid):
#x = gauss(0,0.1) + x
#y = gauss(0,0.1) + y
#plt.scatter(x, y, c='black')
#plt.text(x, y+0.02, prog.split("/")[-1])
plt.title('Estimated number of clusters: %d' % n_clusters)
#plt.savefig("clusters.png")
plt.show()
"""
plt.savefig(train_file.replace(".gz", "") + ".clusters.png")
clustered_traces = zip(labels, cluster_labels)
writer = open_csv(train_file.replace(".gz", "") + ".clusters")
for label, cluster in clustered_traces:
writer.writerow([label, cluster])
"""
clusters = dict()
for label, cluster in clustered_traces:
clusters[cluster] = clusters.get(cluster, []) + [label]
for cluster, traces in clusters.items():
plt.figure()
plt.title('Cluster %d' % cluster)
#X_clus = []
#for prog in traces:
# i = labels.index(prog)
# X_clus.append(X_train[i])
#train_dict = dict()
#train_dict[ftype] = X_clus
#model = make_cluster_pipeline_subtraces(ftype)
#X_red = model.fit_transform(train_dict)
#for [x,y],prog in zip(X_red,traces):
for prog in traces:
i = labels.index(prog)
assert(i>=0)
[x,y] = X_red_next[i]
x = gauss(0,0.1) + x
y = gauss(0,0.1) + y
plt.scatter(x, y, c='r')
#if prog in valid_labels:
plt.text(x-0.05, y+0.01, prog.split("/")[-1])
#plt.text(x, y+0.02, prog.split("/")[-1])
plt.show()
#plt.savefig('cluster-%d.png' % cluster)
"""
# return clustered_traces
def TrainCnn(model_file, train_file, valid_file, ftype, nsamples):
csvreader = open_csv(train_file)
train_features = []
train_programs = []
train_classes = []
train_programs, train_features, train_classes = read_traces(
train_file, nsamples, cut=None)
train_size = len(train_features)
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(nb_words=None, filters="", lower=False, split=" ")
# print type(train_features[0])
tokenizer.fit_on_texts(train_features)
max_features = len(tokenizer.word_counts)
preprocessor = DeepReprPreprocessor(tokenizer, window_size, batch_size)
X_train, y_train = preprocessor.preprocess(train_features, 10000)
nb_classes = len(preprocessor.classes)
print preprocessor.classes
model = make_cluster_cnn(
"train",
max_features,
maxlen,
embedding_dims,
nb_filters,
filter_length,
hidden_dims,
nb_classes)
model.fit(X_train, y_train, validation_split=0.1,
batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True)
model.mypreprocessor = preprocessor
#model_file = model_file + ".wei"
#modelfile = open_model(model_file)
print "Saving model to", model_file + ".wei"
model.save_weights(model_file + ".wei")
#model_file = model_file + ".pre"
modelfile = open_model(model_file + ".pre")
print "Saving preprocessor to", model_file + ".pre"
# model.save_weights(model_file)
modelfile.write(pickle.dumps(preprocessor, protocol=2))
"""
def ClusterDoc2Vec(model_file, train_file, valid_file, ftype, nsamples, param):
train_programs, train_features, train_classes = read_traces(train_file, nsamples)
train_size = len(train_programs)
print "using", train_size,"examples to train."
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
print "Vectorizing traces.."
sentences = []
for (prog,trace) in zip(train_programs,train_features):
sentences.append(TaggedDocument(trace.split(" "), [prog]))
model = Doc2Vec(dm=2, min_count=1, window=5, size=100, sample=1e-4, negative=5, workers=8, iter=1)
model.build_vocab(sentences)
for epoch in range(20):
#print model
model.train(sentences)
shuffle(sentences)
train_dict = dict()
vec_train_features = []
for prog in train_programs:
#print prog, model.docvecs[prog]
vec_train_features.append(model.docvecs[prog])
train_dict[ftype] = vec_train_features
print "Transforming data and fitting model.."
model = make_cluster_pipeline_doc2vec(ftype)
X_red = model.fit_transform(train_dict)
#mpl.rcParams.update({'font.size': 10})
plt.figure()
colors = 'brgcmykbgrcmykbgrcmykbgrcmyk'
ncolors = len(colors)
for prog,[x,y],cl in zip(train_programs, X_red, train_classes):
x = gauss(0,0.1) + x
y = gauss(0,0.1) + y
try:
plt.scatter(x, y, c=colors[int(cl)])
plt.text(x, y+0.02, prog.split("/")[-1])
except ValueError:
plt.text(x, y+0.02, cl)
#plt.show()
plt.savefig(train_file.replace(".gz","")+".png")
from sklearn.cluster import MeanShift, estimate_bandwidth
bandwidth = estimate_bandwidth(X_red, quantile=0.2)
print "Clustering with bandwidth:", bandwidth
af = MeanShift(bandwidth=bandwidth*param).fit(X_red)
cluster_centers = af.cluster_centers_
labels = af.labels_
n_clusters_ = len(cluster_centers)
plt.close('all')
plt.figure(1)
plt.clf()
for ([x,y],label, cluster_label) in zip(X_red,train_programs, labels):
x = gauss(0,0.1) + x
y = gauss(0,0.1) + y
plt.scatter(x, y, c = colors[cluster_label % ncolors])
for i,[x,y] in enumerate(cluster_centers):
plt.plot(x, y, 'o', markerfacecolor=colors[i % ncolors],
markeredgecolor='k', markersize=7)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.savefig(train_file.replace(".gz","")+".clusters.png")
#plt.show()
clustered_traces = zip(train_programs, labels)
writer = write_csv(train_file.replace(".gz","")+".clusters")
for label, cluster in clustered_traces:
writer.writerow([label.split("/")[-1], cluster])
"""
def ClusterScikit(
model_file,
train_file,
valid_file,
ftype,
nsamples,
vectorizer,
reducer,
param):
train_programs, train_features, train_classes = read_traces(
train_file, nsamples)
train_size = len(train_programs)
print "using", train_size, "examples to train."
if vectorizer == "bow":
train_dict = dict()
train_dict[ftype] = train_features
#batch_size = 16
#window_size = 20
print "Transforming data and fitting model.."
model = make_cluster_pipeline_bow(ftype, reducer)
X_red = model.fit_transform(train_dict)
elif vectorizer == "doc2vec":
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
print "Vectorizing traces.."
sentences = []
for (prog, trace) in zip(train_programs, train_features):
sentences.append(TaggedDocument(trace.split(" "), [prog]))
model = Doc2Vec(dm=2, min_count=1, window=5, size=100,
sample=1e-4, negative=5, workers=8, iter=1)
model.build_vocab(sentences)
for epoch in range(20):
# print model
model.train(sentences)
shuffle(sentences)
train_dict = dict()
vec_train_features = []
for prog in train_programs:
# print prog, model.docvecs[prog]
vec_train_features.append(model.docvecs[prog])
train_dict[ftype] = vec_train_features
print "Transforming data and fitting model.."
model = make_cluster_pipeline_doc2vec(ftype, reducer)
X_red = model.fit_transform(train_dict)
#pl.rcParams.update({'font.size': 10})
if isinstance(X_red, list):
X_red = np.vstack(X_red)
print X_red.shape
if X_red.shape[1] == 2:
plt.figure()
colors = 'brgcmykbgrcmykbgrcmykbgrcmyk'
ncolors = len(colors)
for prog, [x, y], cl in zip(train_programs, X_red, train_classes):
x = gauss(0, 0.1) + x
y = gauss(0, 0.1) + y
try:
plt.scatter(x, y, c=colors[int(cl)])
plt.text(x, y + 0.02, prog.split("/")[-1])
except ValueError:
plt.text(x, y + 0.02, cl)
if valid_file is not None:
valid_programs, valid_features, valid_classes = read_traces(
valid_file, None)
valid_dict = dict()
valid_dict[ftype] = valid_features
X_red = model.transform(valid_dict)
for prog, [x, y], cl in zip(valid_programs, X_red, valid_classes):
x = gauss(0, 0.1) + x
y = gauss(0, 0.1) + y
plt.scatter(x, y, c=colors[cl + 1])
plt.text(x, y + 0.02, prog.split("/")[-1])
# plt.show()
plt.savefig(train_file.replace(".gz", "") + ".png")
from sklearn.cluster import MeanShift, estimate_bandwidth
bandwidth = estimate_bandwidth(X_red, quantile=0.2)
print "Clustering with bandwidth:", bandwidth
af = MeanShift(bandwidth=bandwidth * param).fit(X_red)
cluster_centers = af.cluster_centers_
labels = af.labels_
n_clusters_ = len(cluster_centers)
if X_red.shape[1] == 2:
plt.close('all')
plt.figure(1)
plt.clf()
for ([x, y], label, cluster_label) in zip(
X_red, train_programs, labels):
x = gauss(0, 0.1) + x
y = gauss(0, 0.1) + y
plt.scatter(x, y, c=colors[cluster_label % ncolors])
for i, [x, y] in enumerate(cluster_centers):
plt.plot(x, y, 'o', markerfacecolor=colors[i % ncolors],
markeredgecolor='k', markersize=7)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.savefig(train_file.replace(".gz", "") + ".clusters.png")
# plt.show()
clustered_traces = zip(train_programs, labels)
writer = write_csv(train_file.replace(".gz", "") + ".clusters")
for label, cluster in clustered_traces:
writer.writerow([label.split("/")[-1], cluster])
| gpl-3.0 |
kaichogami/scikit-learn | sklearn/utils/tests/test_multiclass.py | 34 | 13405 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
| bsd-3-clause |
chenxulong/quanteco | examples/clt3d.py | 7 | 2222 | """
Origin: QE by John Stachurski and Thomas J. Sargent
Filename: clt3d.py
Visual illustration of the central limit theorem. Produces a 3D figure
showing the density of the scaled sample mean \sqrt{n} \bar X_n plotted
against n.
"""
import numpy as np
from scipy.stats import beta, gaussian_kde
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
import matplotlib.pyplot as plt
beta_dist = beta(2, 2)
def gen_x_draws(k):
"""
Returns a flat array containing k independent draws from the
distribution of X, the underlying random variable. This distribution is
itself a convex combination of three beta distributions.
"""
bdraws = beta_dist.rvs((3, k))
# == Transform rows, so each represents a different distribution == #
bdraws[0, :] -= 0.5
bdraws[1, :] += 0.6
bdraws[2, :] -= 1.1
# == Set X[i] = bdraws[j, i], where j is a random draw from {0, 1, 2} == #
js = np.random.random_integers(0, 2, size=k)
X = bdraws[js, np.arange(k)]
# == Rescale, so that the random variable is zero mean == #
m, sigma = X.mean(), X.std()
return (X - m) / sigma
nmax = 5
reps = 100000
ns = list(range(1, nmax + 1))
# == Form a matrix Z such that each column is reps independent draws of X == #
Z = np.empty((reps, nmax))
for i in range(nmax):
Z[:, i] = gen_x_draws(reps)
# == Take cumulative sum across columns
S = Z.cumsum(axis=1)
# == Multiply j-th column by sqrt j == #
Y = (1 / np.sqrt(ns)) * S
# == Plot == #
fig = plt.figure()
ax = fig.gca(projection='3d')
a, b = -3, 3
gs = 100
xs = np.linspace(a, b, gs)
# == Build verts == #
greys = np.linspace(0.3, 0.7, nmax)
verts = []
for n in ns:
density = gaussian_kde(Y[:, n-1])
ys = density(xs)
verts.append(list(zip(xs, ys)))
poly = PolyCollection(verts, facecolors=[str(g) for g in greys])
poly.set_alpha(0.85)
ax.add_collection3d(poly, zs=ns, zdir='x')
# ax.text(np.mean(rhos), a-1.4, -0.02, r'$\beta$', fontsize=16)
# ax.text(np.max(rhos)+0.016, (a+b)/2, -0.02, r'$\log(y)$', fontsize=16)
ax.set_xlim3d(1, nmax)
ax.set_xticks(ns)
ax.set_xlabel("n")
ax.set_yticks((-3, 0, 3))
ax.set_ylim3d(a, b)
ax.set_zlim3d(0, 0.4)
ax.set_zticks((0.2, 0.4))
plt.show()
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/indexes/datetimes/test_datetime.py | 3 | 31384 | import pytest
import numpy as np
from datetime import date, timedelta, time
import pandas as pd
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas.compat.numpy import np_datetime64_compat
from pandas import (DatetimeIndex, Index, date_range, Series, DataFrame,
Timestamp, datetime, offsets, _np_version_under1p8)
from pandas.util.testing import assert_series_equal, assert_almost_equal
randn = np.random.randn
class TestDatetimeIndex(object):
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')) == 1
assert idx.get_loc('2000-01-01', method='nearest') == 0
assert idx.get_loc('2000-01-01T12', method='nearest') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)) == 1
with tm.assert_raises_regex(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with pytest.raises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
assert idx.get_loc('2000', method='nearest') == slice(0, 3)
assert idx.get_loc('2000-01', method='nearest') == slice(0, 3)
assert idx.get_loc('1999', method='nearest') == 0
assert idx.get_loc('2001', method='nearest') == 2
with pytest.raises(KeyError):
idx.get_loc('1999', method='pad')
with pytest.raises(KeyError):
idx.get_loc('2001', method='backfill')
with pytest.raises(KeyError):
idx.get_loc('foobar')
with pytest.raises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
assert idx.get_loc('2000-01-02', method='nearest') == 0
assert idx.get_loc('2000-01-03', method='nearest') == 1
assert idx.get_loc('2000-01', method='nearest') == slice(0, 2)
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with pytest.raises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
with pytest.raises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
assert '2000' in str(e)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
# GH2658
import datetime
start = datetime.datetime.now()
idx = DatetimeIndex(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
pytest.raises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
assert idx.freqstr == 'D'
expected = pd.PeriodIndex(['2000-01-01', '2000-01-02',
'2000-01-03'], freq='D')
tm.assert_index_equal(idx.to_period(), expected)
# GH 7606
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
assert idx.freqstr is None
tm.assert_index_equal(idx.to_period(), expected)
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_comparisons_nat(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(fidx1, fidx2), (didx1, didx2)]
else:
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_iteration_preserves_tz(self):
tm._skip_if_no_dateutil()
# GH 8890
import dateutil
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_overflow_offset(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
# ends up multiplying really large numbers which overflow
t = Timestamp('2017-01-13 00:00:00', freq='D')
offset = 20169940 * pd.offsets.Day(1)
def f():
t + offset
pytest.raises(OverflowError, f)
def f():
offset + t
pytest.raises(OverflowError, f)
def f():
t - offset
pytest.raises(OverflowError, f)
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_take(self):
dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15),
datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)]
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = DatetimeIndex(start='2010-01-01 09:00',
end='2010-02-01 09:00', freq='H', tz=tz,
name='idx')
expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz)
taken1 = idx.take([5, 6, 8, 12])
taken2 = idx[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, DatetimeIndex)
assert taken.freq is None
assert taken.tz == expected.tz
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_with_timezone(self):
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', tz='US/Eastern')
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', tz='US/Eastern')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', tz='US/Eastern')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', tz='US/Eastern')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
assert dr[1:].name == dr.name
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1],
freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assert_raises_regex(ValueError,
'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_factorize_tz(self):
# GH 13750
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(res, base)
def test_factorize_dst(self):
# GH 13750
idx = pd.date_range('2016-11-06', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
idx = pd.date_range('2016-06-13', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
date_range('2014-01-01', periods=20, freq='MS'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp(
'2014-10-01'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
date_range('2014-01-01', periods=20, freq='MS'))
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
def test_slice_bounds_empty(self):
# GH 14354
empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015')
right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc')
exp = Timestamp('2015-01-02 23:59:59.999999999')
assert right == exp
left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc')
exp = Timestamp('2015-01-02 00:00:00')
assert left == exp
| mit |
eepalms/gem5-newcache | util/stats/output.py | 90 | 7981 | # Copyright (c) 2005-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from chart import ChartOptions
class StatOutput(ChartOptions):
def __init__(self, jobfile, info, stat=None):
super(StatOutput, self).__init__()
self.jobfile = jobfile
self.stat = stat
self.invert = False
self.info = info
def display(self, name, printmode = 'G'):
import info
if printmode == 'G':
valformat = '%g'
elif printmode != 'F' and value > 1e6:
valformat = '%0.5e'
else:
valformat = '%f'
for job in self.jobfile.jobs():
value = self.info.get(job, self.stat)
if value is None:
return
if not isinstance(value, list):
value = [ value ]
if self.invert:
for i,val in enumerate(value):
if val != 0.0:
value[i] = 1 / val
valstring = ', '.join([ valformat % val for val in value ])
print '%-50s %s' % (job.name + ':', valstring)
def graph(self, name, graphdir, proxy=None):
from os.path import expanduser, isdir, join as joinpath
from barchart import BarChart
from matplotlib.numerix import Float, array, zeros
import os, re, urllib
from jobfile import crossproduct
confgroups = self.jobfile.groups()
ngroups = len(confgroups)
skiplist = [ False ] * ngroups
groupopts = []
baropts = []
groups = []
for i,group in enumerate(confgroups):
if group.flags.graph_group:
groupopts.append(group.subopts())
skiplist[i] = True
elif group.flags.graph_bars:
baropts.append(group.subopts())
skiplist[i] = True
else:
groups.append(group)
has_group = bool(groupopts)
if has_group:
groupopts = [ group for group in crossproduct(groupopts) ]
else:
groupopts = [ None ]
if baropts:
baropts = [ bar for bar in crossproduct(baropts) ]
else:
raise AttributeError, 'No group selected for graph bars'
directory = expanduser(graphdir)
if not isdir(directory):
os.mkdir(directory)
html = file(joinpath(directory, '%s.html' % name), 'w')
print >>html, '<html>'
print >>html, '<title>Graphs for %s</title>' % name
print >>html, '<body>'
html.flush()
for options in self.jobfile.options(groups):
chart = BarChart(self)
data = [ [ None ] * len(baropts) for i in xrange(len(groupopts)) ]
enabled = False
stacked = 0
for g,gopt in enumerate(groupopts):
for b,bopt in enumerate(baropts):
if gopt is None:
gopt = []
job = self.jobfile.job(options + gopt + bopt)
if not job:
continue
if proxy:
import db
proxy.dict['system'] = self.info[job.system]
val = self.info.get(job, self.stat)
if val is None:
print 'stat "%s" for job "%s" not found' % \
(self.stat, job)
if isinstance(val, (list, tuple)):
if len(val) == 1:
val = val[0]
else:
stacked = len(val)
data[g][b] = val
if stacked == 0:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
if data[i][j] is None:
data[i][j] = 0.0
else:
for i in xrange(len(groupopts)):
for j in xrange(len(baropts)):
val = data[i][j]
if val is None:
data[i][j] = [ 0.0 ] * stacked
elif len(val) != stacked:
raise ValueError, "some stats stacked, some not"
data = array(data)
if data.sum() == 0:
continue
dim = len(data.shape)
x = data.shape[0]
xkeep = [ i for i in xrange(x) if data[i].sum() != 0 ]
y = data.shape[1]
ykeep = [ i for i in xrange(y) if data[:,i].sum() != 0 ]
data = data.take(xkeep, axis=0)
data = data.take(ykeep, axis=1)
if not has_group:
data = data.take([ 0 ], axis=0)
chart.data = data
bopts = [ baropts[i] for i in ykeep ]
bdescs = [ ' '.join([o.desc for o in opt]) for opt in bopts]
if has_group:
gopts = [ groupopts[i] for i in xkeep ]
gdescs = [ ' '.join([o.desc for o in opt]) for opt in gopts]
if chart.legend is None:
if stacked:
try:
chart.legend = self.info.rcategories
except:
chart.legend = [ str(i) for i in xrange(stacked) ]
else:
chart.legend = bdescs
if chart.xticks is None:
if has_group:
chart.xticks = gdescs
else:
chart.xticks = []
chart.graph()
names = [ opt.name for opt in options ]
descs = [ opt.desc for opt in options ]
if names[0] == 'run':
names = names[1:]
descs = descs[1:]
basename = '%s-%s' % (name, ':'.join(names))
desc = ' '.join(descs)
pngname = '%s.png' % basename
psname = '%s.eps' % re.sub(':', '-', basename)
epsname = '%s.ps' % re.sub(':', '-', basename)
chart.savefig(joinpath(directory, pngname))
chart.savefig(joinpath(directory, epsname))
chart.savefig(joinpath(directory, psname))
html_name = urllib.quote(pngname)
print >>html, '''%s<br><img src="%s"><br>''' % (desc, html_name)
html.flush()
print >>html, '</body>'
print >>html, '</html>'
html.close()
| bsd-3-clause |
kdebrab/pandas | pandas/core/sorting.py | 2 | 16130 | """ miscellaneous sorting / groupby utilities """
import numpy as np
from pandas.compat import long, string_types, PY3
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_int64,
is_list_like,
is_categorical_dtype)
from pandas.core.dtypes.cast import infer_dtype_from_array
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algorithms
from pandas._libs import lib, algos, hashtable
from pandas._libs.hashtable import unique_label_indices
_INT64_MAX = np.iinfo(np.int64).max
def get_group_index(labels, shape, sort, xnull):
"""
For the particular label_list, gets the offsets into the hypothetical list
representing the totally ordered cartesian product of all possible label
combinations, *as long as* this space fits within int64 bounds;
otherwise, though group indices identify unique combinations of
labels, they cannot be deconstructed.
- If `sort`, rank of returned ids preserve lexical ranks of labels.
i.e. returned id's can be used to do lexical sort on labels;
- If `xnull` nulls (-1 labels) are passed through.
Parameters
----------
labels: sequence of arrays
Integers identifying levels at each location
shape: sequence of ints same length as labels
Number of unique levels at each location
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
An array of type int64 where two elements are equal if their corresponding
labels are equal at all location.
"""
def _int64_cut_off(shape):
acc = long(1)
for i, mul in enumerate(shape):
acc *= long(mul)
if not acc < _INT64_MAX:
return i
return len(shape)
def maybe_lift(lab, size):
# promote nan values (assigned -1 label in lab array)
# so that all output values are non-negative
return (lab + 1, size + 1) if (lab == -1).any() else (lab, size)
labels = map(ensure_int64, labels)
if not xnull:
labels, shape = map(list, zip(*map(maybe_lift, labels, shape)))
labels = list(labels)
shape = list(shape)
# Iteratively process all the labels in chunks sized so less
# than _INT64_MAX unique int ids will be required for each chunk
while True:
# how many levels can be done without overflow:
nlev = _int64_cut_off(shape)
# compute flat ids for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
out = stride * labels[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
if shape[i] == 0:
stride = 0
else:
stride //= shape[i]
out += labels[i] * stride
if xnull: # exclude nulls
mask = labels[0] == -1
for lab in labels[1:nlev]:
mask |= lab == -1
out[mask] = -1
if nlev == len(shape): # all levels done!
break
# compress what has been done so far in order to avoid overflow
# to retain lexical ranks, obs_ids should be sorted
comp_ids, obs_ids = compress_group_index(out, sort=sort)
labels = [comp_ids] + labels[nlev:]
shape = [len(obs_ids)] + shape[nlev:]
return out
def get_compressed_ids(labels, sizes):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
Parameters
----------
labels : list of label arrays
sizes : list of size of the levels
Returns
-------
tuple of (comp_ids, obs_group_ids)
"""
ids = get_group_index(labels, sizes, sort=True, xnull=False)
return compress_group_index(ids, sort=True)
def is_int64_overflow_possible(shape):
the_prod = long(1)
for x in shape:
the_prod *= long(x)
return the_prod >= _INT64_MAX
def decons_group_index(comp_labels, shape):
# reconstruct labels
if is_int64_overflow_possible(shape):
# at some point group indices are factorized,
# and may not be deconstructed here! wrong path!
raise ValueError('cannot deconstruct factorized group indices!')
label_list = []
factor = 1
y = 0
x = comp_labels
for i in reversed(range(len(shape))):
labels = (x - y) % (factor * shape[i]) // factor
np.putmask(labels, comp_labels < 0, -1)
label_list.append(labels)
y = labels * factor
factor *= shape[i]
return label_list[::-1]
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull):
"""
reconstruct labels from observed group ids
Parameters
----------
xnull: boolean,
if nulls are excluded; i.e. -1 labels are passed through
"""
if not xnull:
lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8')
shape = np.asarray(shape, dtype='i8') + lift
if not is_int64_overflow_possible(shape):
# obs ids are deconstructable! take the fast route!
out = decons_group_index(obs_ids, shape)
return out if xnull or not lift.any() \
else [x - y for x, y in zip(out, lift)]
i = unique_label_indices(comp_ids)
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
return [i8copy(lab[i]) for lab in labels]
def indexer_from_factorized(labels, shape, compress=True):
ids = get_group_index(labels, shape, sort=True, xnull=False)
if not compress:
ngroups = (ids.size and ids.max()) + 1
else:
ids, obs = compress_group_index(ids, sort=True)
ngroups = len(obs)
return get_group_index_sorter(ids, ngroups)
def lexsort_indexer(keys, orders=None, na_position='last'):
from pandas.core.arrays import Categorical
labels = []
shape = []
if isinstance(orders, bool):
orders = [orders] * len(keys)
elif orders is None:
orders = [True] * len(keys)
for key, order in zip(keys, orders):
# we are already a Categorical
if is_categorical_dtype(key):
c = key
# create the Categorical
else:
c = Categorical(key, ordered=True)
if na_position not in ['last', 'first']:
raise ValueError('invalid na_position: {!r}'.format(na_position))
n = len(c.categories)
codes = c.codes.copy()
mask = (c.codes == -1)
if order: # ascending
if na_position == 'last':
codes = np.where(mask, n, codes)
elif na_position == 'first':
codes += 1
else: # not order means descending
if na_position == 'last':
codes = np.where(mask, n, n - codes - 1)
elif na_position == 'first':
codes = np.where(mask, 0, n - codes)
if mask.any():
n += 1
shape.append(n)
labels.append(codes)
return indexer_from_factorized(labels, shape)
def nargsort(items, kind='quicksort', ascending=True, na_position='last'):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
# specially handle Categorical
if is_categorical_dtype(items):
return items.argsort(ascending=ascending, kind=kind)
items = np.asanyarray(items)
idx = np.arange(len(items))
mask = isna(items)
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == 'last':
indexer = np.concatenate([indexer, nan_idx])
elif na_position == 'first':
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError('invalid na_position: {!r}'.format(na_position))
return indexer
class _KeyMapper(object):
"""
Ease my suffering. Map compressed group id -> key tuple
"""
def __init__(self, comp_ids, ngroups, levels, labels):
self.levels = levels
self.labels = labels
self.comp_ids = comp_ids.astype(np.int64)
self.k = len(labels)
self.tables = [hashtable.Int64HashTable(ngroups)
for _ in range(self.k)]
self._populate_tables()
def _populate_tables(self):
for labs, table in zip(self.labels, self.tables):
table.map(self.comp_ids, labs.astype(np.int64))
def get_key(self, comp_id):
return tuple(level[table.get_item(comp_id)]
for table, level in zip(self.tables, self.levels))
def get_flattened_iterator(comp_ids, ngroups, levels, labels):
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, levels, labels)
return [mapper.get_key(i) for i in range(ngroups)]
def get_indexer_dict(label_list, keys):
""" return a diction of {labels} -> {indexers} """
shape = list(map(len, keys))
group_index = get_group_index(label_list, shape, sort=True, xnull=True)
ngroups = ((group_index.size and group_index.max()) + 1) \
if is_int64_overflow_possible(shape) \
else np.prod(shape, dtype='i8')
sorter = get_group_index_sorter(group_index, ngroups)
sorted_labels = [lab.take(sorter) for lab in label_list]
group_index = group_index.take(sorter)
return lib.indices_fast(sorter, group_index, keys, sorted_labels)
# ----------------------------------------------------------------------
# sorting levels...cleverly?
def get_group_index_sorter(group_index, ngroups):
"""
algos.groupsort_indexer implements `counting sort` and it is at least
O(ngroups), where
ngroups = prod(shape)
shape = map(len, keys)
that is, linear in the number of combinations (cartesian product) of unique
values of groupby keys. This can be huge when doing multi-key groupby.
np.argsort(kind='mergesort') is O(count x log(count)) where count is the
length of the data-frame;
Both algorithms are `stable` sort and that is necessary for correctness of
groupby operations. e.g. consider:
df.groupby(key)[col].transform('first')
"""
count = len(group_index)
alpha = 0.0 # taking complexities literally; there may be
beta = 1.0 # some room for fine-tuning these parameters
do_groupsort = (count > 0 and ((alpha + beta * ngroups) <
(count * np.log(count))))
if do_groupsort:
sorter, _ = algos.groupsort_indexer(ensure_int64(group_index),
ngroups)
return ensure_platform_int(sorter)
else:
return group_index.argsort(kind='mergesort')
def compress_group_index(group_index, sort=True):
"""
Group_index is offsets into cartesian product of all possible labels. This
space can be huge, so this function compresses it, by computing offsets
(comp_ids) into the list of unique labels (obs_group_ids).
"""
size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT)
table = hashtable.Int64HashTable(size_hint)
group_index = ensure_int64(group_index)
# note, group labels come out ascending (ie, 1,2,3 etc)
comp_ids, obs_group_ids = table.get_labels_groupby(group_index)
if sort and len(obs_group_ids) > 0:
obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids)
return comp_ids, obs_group_ids
def _reorder_by_uniques(uniques, labels):
# sorter is index where elements ought to go
sorter = uniques.argsort()
# reverse_indexer is where elements came from
reverse_indexer = np.empty(len(sorter), dtype=np.int64)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = labels < 0
# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
np.putmask(labels, mask, -1)
# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
return uniques, labels
def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False):
"""
Sort ``values`` and reorder corresponding ``labels``.
``values`` should be unique if ``labels`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
.. versionadded:: 0.19.0
Parameters
----------
values : list-like
Sequence; must be unique if ``labels`` is not None.
labels : list_like
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``labels`` to mark "not found".
Ignored when ``labels`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``labels`` is None.
Returns
-------
ordered : ndarray
Sorted ``values``
new_labels : ndarray
Reordered ``labels``; returned when ``labels`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``labels`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``labels`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError("Only list-like objects are allowed to be passed to"
"safe_sort as values")
if not isinstance(values, np.ndarray):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, string_types) for x in values],
dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if PY3 and lib.infer_dtype(values) == 'mixed-integer':
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# labels:
if labels is None:
return ordered
if not is_list_like(labels):
raise TypeError("Only list-like objects or None are allowed to be"
"passed to safe_sort as labels")
labels = ensure_platform_int(np.asarray(labels))
from pandas import Index
if not assume_unique and not Index(values).is_unique:
raise ValueError("values should be unique if labels is not None")
if sorter is None:
# mixed types
(hash_klass, _), values = algorithms._get_data_algo(
values, algorithms._hashtables)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
mask = (labels < -len(values)) | (labels >= len(values)) | \
(labels == na_sentinel)
# (Out of bound indices will be masked with `na_sentinel` next, so we may
# deal with them here without performance loss using `mode='wrap'`.)
new_labels = reverse_indexer.take(labels, mode='wrap')
np.putmask(new_labels, mask, na_sentinel)
return ordered, ensure_platform_int(new_labels)
| bsd-3-clause |
idlead/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
leylabmpi/leylab_pipelines | tests/test_Utils.py | 1 | 1431 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# import
## batteries
import os
import sys
import unittest
## 3rd party
import pandas as pd
## package
from leylab_pipelines import Utils
# data dir
test_dir = os.path.join(os.path.dirname(__file__))
data_dir = os.path.join(test_dir, 'data')
# tests
class Test_Utils(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_make_range(self):
x = Utils.make_range('all')
self.assertIsNone(x)
x = Utils.make_range('0')
self.assertListEqual(x, [0])
x = Utils.make_range('1,2,5')
self.assertListEqual(x, [1,2,5])
x = Utils.make_range('1,2,5-6')
self.assertListEqual(x, [1,2,5,6])
x = Utils.make_range('1-3,5-6')
self.assertListEqual(x, [1,2,3,5,6])
def test_make_range_zeroindex(self):
x = Utils.make_range('all', set_zero_index=True)
self.assertIsNone(x)
with self.assertRaises(ValueError):
Utils.make_range('0', set_zero_index=True)
x = Utils.make_range('1,2,5', set_zero_index=True)
self.assertListEqual(x, [0,1,4])
x = Utils.make_range('1,2,5-6', set_zero_index=True)
self.assertListEqual(x, [0,1,4,5])
def test_check_gwl(self):
gwl_file = os.path.join(data_dir, 'multi_dispense.gwl')
ret = Utils.check_gwl(gwl_file)
self.assertIsNone(ret)
| mit |
jpautom/scikit-learn | examples/cluster/plot_color_quantization.py | 297 | 3443 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1]
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |
krez13/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
ARudiuk/mne-python | examples/time_frequency/plot_source_label_time_frequency.py | 4 | 3776 | """
=========================================================
Compute power and phase lock in label of the source space
=========================================================
Compute time-frequency maps of power and phase lock in the source space.
The inverse method is linear based on dSPM inverse operator.
The example also shows the difference in the time-frequency maps
when they are computed with and without subtracting the evoked response
from each epoch. The former results in induced activity only while the
latter also includes evoked (stimulus-locked) activity.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and phase lock
power, phase_lock = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
phase_lock = np.mean(phase_lock, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % title)
plt.colorbar()
plt.show()
| bsd-3-clause |
tensorflow/model-card-toolkit | setup.py | 1 | 5168 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup to install the Model Card Toolkit.
Run with `python3 setup.py sdist bdist_wheel`.
"""
# TODO(b/188859752): deprecate distutils
from distutils.command import build
import platform
import shutil
import subprocess
from setuptools import Command
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py>=0.9,<0.11',
'semantic-version>=2.8.0,<3',
'jinja2>=2.10,<3',
'matplotlib>=3.2.0,<4',
'jsonschema>=3.2.0,<4',
'tensorflow-data-validation>=0.26.0,<0.27.0',
'tensorflow-model-analysis>=0.26.0,<0.27.0',
'tensorflow-metadata>=0.26.0,<0.27.0',
'ml-metadata>=0.26.0,<0.27.0',
'dataclasses;python_version<"3.7"',
]
# Get version from version module.
with open('model_card_toolkit/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict) # pylint: disable=exec-used
__version__ = globals_dict['__version__']
with open('README.md', 'r', encoding='utf-8') as fh:
_LONG_DESCRIPTION = fh.read()
class _BuildCommand(build.build):
"""Build everything that is needed to install.
This overrides the original distutils "build" command to to run bazel_build
command before any sub_commands.
build command is also invoked from bdist_wheel and install command, therefore
this implementation covers the following commands:
- pip install . (which invokes bdist_wheel)
- python setup.py install (which invokes install command)
- python setup.py bdist_wheel (which invokes bdist_wheel command)
"""
# Add "bazel_build" command as the first sub_command of "build". Each
# sub_command of "build" (e.g. "build_py", "build_extbaz", etc.) is executed
# sequentially when running a "build" command, if the second item in the tuple
# (predicate method) is evaluated to true.
sub_commands = [
('bazel_build', lambda self: True),
] + build.build.sub_commands
class _BazelBuildCommand(Command):
"""Build Bazel artifacts and move generated files."""
def initialize_options(self):
pass
def finalize_options(self):
# verified with bazel 2.0.0, 3.0.0, and 4.0.0 via bazelisk
self._bazel_cmd = shutil.which('bazel')
if not self._bazel_cmd:
self._bazel_cmd = shutil.which('bazelisk')
if not self._bazel_cmd:
raise RuntimeError(
'Could not find "bazel" or "bazelisk" binary. Please visit '
'https://docs.bazel.build/versions/master/install.html for '
'installation instruction.')
self._additional_build_options = []
if platform.system() == 'Darwin': # see b/175182911 for context
self._additional_build_options = ['--macos_minimum_os=10.9']
def run(self):
subprocess.check_call([
self._bazel_cmd, 'run', '--verbose_failures',
*self._additional_build_options,
'model_card_toolkit:move_generated_files'
])
setup(
name='model-card-toolkit',
version=__version__,
description='Model Card Toolkit',
long_description=_LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/tensorflow/model-card-toolkit',
author='Google LLC',
author_email='[email protected]',
packages=find_packages(exclude=('bazel-model_card_toolkit*',)),
package_data={
'model_card_toolkit': ['schema/**/*.json', 'template/**/*.jinja']
},
python_requires='>=3.6,<4',
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='model card toolkit ml metadata machine learning',
cmdclass={
'build': _BuildCommand,
'bazel_build': _BazelBuildCommand,
})
| apache-2.0 |
sushant1993/alkaline-lysis | application.py | 2 | 1700 | from flask import Flask, render_template,request
import csv
from sklearn import linear_model
from scipy.optimize import curve_fit
import scipy
app = Flask(__name__)
def compute_coeffs():
csv_file = open('dop_c2.csv','r')
dim1 = []
dim2 = []
lines = csv_file.readlines()
for line in lines:
row = line.strip('\r\n').split(',')
row = map(float,row)
dim2.append(row[-1])
row.pop(-1)
dim1.append(row)
clf = linear_model.Ridge(1)
clf.fit(dim1,dim2)
coeffs = clf.coef_
return coeffs
def compute_yield(coeffs, inputs):
plasmid_yield = 0
for i in range(len(coeffs)):
plasmid_yield += coeffs[i]*inputs[i]
return plasmid_yield
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit',methods=['POST'])
def submit():
if(request.method == "POST"):
#return request.form.get('buff1',None)+" "+request.form.get('buff2',None)+" "+request.form.get('buff3',None)+" "+request.form.get('vte',None)+" "+request.form.get('voc',None)
voc = request.form.get('voc',None)
buff1 = request.form.get('buff1',None)
buff2 = request.form.get('buff2',None)
buff3 = request.form.get('buff3',None)
vte = request.form.get('vte',None)
input_data = [float(voc),float(buff1),float(buff2),float(buff3),float(vte)]
print "***************************************"
print "Input data is: "+str(input_data)
print "***************************************"
coeff = compute_coeffs().tolist()
print "Coefficients: "+str(coeff)
print "***************************************"
plasmid_yield = compute_yield(coeff,input_data)
return "The predicted yield is : "+str(abs(plasmid_yield))
else:
return "Fail"
if __name__ == '__main__':
app.run(debug=True) | mit |
DStauffman/dstauffman2 | dstauffman2/games/tictactoe/tests/test_plotting.py | 1 | 4558 | r"""
Test file for the `tictactoe.classes` module of the dstauffman2 code. It is intented to contain test
cases to demonstrate functionaliy and correct outcomes for all the functions within the module.
Notes
-----
#. Written by David C. Stauffer in January 2016.
"""
#%% Imports
import unittest
import matplotlib.pyplot as plt
import numpy as np
import dstauffman2.games.tictactoe as ttt
#%% Aliases
o = ttt.PLAYER['o']
x = ttt.PLAYER['x']
n = ttt.PLAYER['none']
#%% Functions - _make_board
def _make_board():
r"""Makes a board and returns the figure and axis for use in testing."""
plt.ioff()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(-0.5, 2.5)
ax.set_ylim(-0.5, 2.5)
ax.invert_yaxis()
return (fig, ax)
#%% plot_cur_move
class Test_plot_cur_move(unittest.TestCase):
r"""
Tests the plot_cur_move function with the following cases:
Nominal
"""
def setUp(self):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.set_xlim(-0.5, 0.5)
self.ax.set_ylim(-0.5, 0.5)
def test_x(self):
ttt.plot_cur_move(self.ax, x)
def test_o(self):
ttt.plot_cur_move(self.ax, o)
def test_none(self):
ttt.plot_cur_move(self.ax, n)
def test_bad_value(self):
with self.assertRaises(ValueError):
ttt.plot_cur_move(self.ax, 999)
def tearDown(self):
plt.close(self.fig)
#%% plot_piece
class Test_plot_piece(unittest.TestCase):
r"""
Tests the plot_piece function with the following cases:
Nominal
"""
def setUp(self):
(self.fig, self.ax) = _make_board()
def test_x(self):
ttt.plot_piece(self.ax, 1, 1, 0.9, (0, 0, 1), x)
def test_o(self):
ttt.plot_piece(self.ax, 1, 1, 0.9, (0, 0, 1), o, thick=False)
def test_draw(self):
ttt.plot_piece(self.ax, 1, 1, 0.9, (0, 0, 1), ttt.PLAYER['draw'])
def test_bad_player(self):
with self.assertRaises(ValueError):
ttt.plot_piece(self.ax, 1, 1, 0.9, (0, 0, 1), 999)
def tearDown(self):
plt.close(self.fig)
#%% plot_board
class Test_plot_board(unittest.TestCase):
r"""
Tests the plot_board function with the following cases:
Nominal
"""
def setUp(self):
(self.fig, self.ax) = _make_board()
def test_nominal(self):
board = np.full((3, 3), n, dtype=int)
board[0, 0:2] = x
board[1, 1] = o
ttt.plot_board(self.ax, board)
def test_bad_board_position(self):
board = np.full((3, 3), n, dtype=int)
board[1, 1] = 999
with self.assertRaises(ValueError):
ttt.plot_board(self.ax, board)
def tearDown(self):
plt.close(self.fig)
#%% plot_win
class Test_plot_win(unittest.TestCase):
r"""
Tests the plot_win function with the following cases:
Nominal
"""
def setUp(self):
(self.fig, self.ax) = _make_board()
def test_nominal(self):
mask = np.zeros((3, 3), dtype=bool)
mask[0, 0:2] = True
board = np.full((3, 3), n, dtype=int)
board[0, 0:2] = x
ttt.plot_win(self.ax, mask, board)
def tearDown(self):
plt.close(self.fig)
#%% plot_possible_win
class Test_plot_possible_win(unittest.TestCase):
r"""
Tests the plot_possible_win function with the following cases:
Nominal
"""
def setUp(self):
(self.fig, self.ax) = _make_board()
def test_all_out_wins(self):
board = np.array([[x, x, n], [n, n, n], [o, o, n]], dtype=int)
(o_moves, x_moves) = ttt.find_moves(board)
ttt.plot_possible_win(self.ax, o_moves, x_moves)
def test_shared_wins(self):
board = np.array([[x, n, o], [n, n, n], [o, n, x]], dtype=int)
(o_moves, x_moves) = ttt.find_moves(board)
ttt.plot_possible_win(self.ax, o_moves, x_moves)
def tearDown(self):
plt.close(self.fig)
#%% plot_powers
class Test_plot_powers(unittest.TestCase):
r"""
Tests the plot_powers function with the following cases:
Nominal
"""
def setUp(self):
(self.fig, self.ax) = _make_board()
def test_nominal(self):
board = np.array([[x, n, n], [n, o, n], [n, o, n]], dtype=int)
ttt.plot_board(self.ax, board)
(o_moves, x_moves) = ttt.find_moves(board)
ttt.plot_powers(self.ax, board, o_moves, x_moves)
def tearDown(self):
plt.close(self.fig)
#%% Unit test execution
if __name__ == '__main__':
unittest.main(exit=False)
| lgpl-3.0 |
mikebenfield/scikit-learn | sklearn/tree/export.py | 35 | 16873 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# License: BSD 3 clause
import numpy as np
import warnings
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__():
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default='tree.dot')
Handle or name of the output file. If ``None``, the result is
returned as a string. This will the default from version 0.20.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
return_string = False
try:
if out_file == SENTINEL:
warnings.warn("out_file can be set to None starting from 0.18. "
"This will be the default in 0.20.",
DeprecationWarning)
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
| bsd-3-clause |
jundongl/PyFeaST | skfeature/example/test_fisher_score.py | 3 | 1609 | import scipy.io
from sklearn import cross_validation
from sklearn import svm
from sklearn.metrics import accuracy_score
from skfeature.function.similarity_based import fisher_score
def main():
# load data
mat = scipy.io.loadmat('../data/COIL20.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 100 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the score of each feature on the training set
score = fisher_score.fisher_score(X[train], y[train])
# rank features in descending order according to score
idx = fisher_score.feature_ranking(score)
# obtain the dataset on the selected features
selected_features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(selected_features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(selected_features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| gpl-2.0 |
brguez/TEIBA | src/python/genomic_distribution_cor.py | 1 | 4598 | #!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
def scatterCorr(arrayA, arrayB, threshold, outPath):
"""
Interpretation of strength of correlation
very weak: < 0,15
weak: 0,15-0,25
moderate: 0,25-0,40
strong: 0,40-0,75
very strong: >0,75
"""
corr = stats.spearmanr(arrayA, arrayB)
coefficient = float(format(corr[0], '.3f'))
pvalue = float(corr[1])
print "pvalue: ", pvalue
## Make scatterplot if rho >= threshold or <= -theshold
if (coefficient >= threshold) or (coefficient <= -threshold):
# Make scatterplot
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(1, 1, 1)
#plot = sns.jointplot(x=arrayA, y=arrayB, kind="hex", xlim=(0,40), gridsize=50, dropna=True, cmap="Blues", stat_func=spearmanr)
plot = sns.jointplot(x=arrayA, y=arrayB, kind="kde", space=0, xlim=(0,30), gridsize=50, dropna=True, cmap="Blues", stat_func=spearmanr)
plt.xlabel('# L1', fontsize=12)
plt.ylabel('Replication time', fontsize=12)
# sns.plt.subplots_adjust(left=0.2, right=0.8, top=0.8, bottom=0.2) # shrink fig so cbar is visible
# cax = plot.fig.add_axes([.85, .25, .05, .4]) # x, y, width, height
# sns.plt.colorbar(cax=cax)
#sns.jointplot(x=arrayA, y=arrayB, kind="kde", space=0, color="b", xlim=(0,30))
## Save figure
fileName = outPath + '_' + str(coefficient) + '_correlation.pdf'
plt.savefig(fileName)
return coefficient, pvalue
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import scipy.stats as stats
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from scipy.stats import spearmanr
## Graphic style ##
sns.set_style("white")
sns.set_style("ticks")
#sns.set(font="Verdana")
## Get user's input ##
parser = argparse.ArgumentParser(description="Compute correlation between L1 retrotransposition rate and diverse genomic features (replication time, gene expression and gene density)")
parser.add_argument('input', help='Genomic features table')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
inputFile = args.input
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "inputFile: ", inputFile
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Load input tables:
##########################
inputDf = pd.read_csv(inputFile, header=0, sep='\t')
print "inputDf: ", inputDf
#### 2. Number L1 insertions and L1 endonuclease motif correlation
###################################################################
## Remove bins with a number of L1 EN motifs == 0
# These will mostly correspond to telomeric, centromeric regions
filteredDf = inputDf[inputDf["nbL1Motif"] > 0]
## Make plot
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(1, 1, 1)
plot = sns.jointplot("nbL1", "nbL1Motif", data=filteredDf, xlim=(0,30), kind="kde", space=0, dropna=True, cmap="Blues", stat_func=spearmanr)
## Save figure
outPath = outDir + '/nbL1_nbL1Motif_corr.pdf'
plt.savefig(outPath)
outPath = outDir + '/nbL1_nbL1Motif_corr.svg'
plt.savefig(outPath)
#### 3. Number L1 insertions and median replication time correlation
#####################################################################
## Remove bins with NA values for replication timing (one bin with NA expression)
filteredDf = inputDf.dropna(subset=["medianRT"])
## Make plot
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(1, 1, 1)
plot = sns.jointplot("nbL1", "medianRT", data=filteredDf, xlim=(0,30), kind="kde", space=0, dropna=True, cmap="Blues", stat_func=spearmanr)
## Save figure
outPath = outDir + '/nbL1_medianRT_corr.pdf'
plt.savefig(outPath)
outPath = outDir + '/nbL1_medianRT_corr.svg'
plt.savefig(outPath)
####
header("Finished")
| gpl-3.0 |
yselivonchyk/TensorFlow_DCIGN | utils.py | 1 | 16524 | import datetime
import time
from matplotlib import pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import collections
import tensorflow as tf
from scipy import misc
import re
import sys
import subprocess as sp
import warnings
import functools
import scipy
import random
IMAGE_FOLDER = './img/'
TEMP_FOLDER = './tmp/'
EPOCH_THRESHOLD = 4
FLAGS = tf.app.flags.FLAGS
_start_time = None
# CONSOLE OPERATIONS
def reset_start_time():
global _start_time
_start_time = None
def _get_time_offset():
global _start_time
time = datetime.datetime.now()
if _start_time is None:
_start_time = time
return '\t\t'
sec = (time - _start_time).total_seconds()
res = '(+%d)\t' % sec if sec < 60 else '(+%d:%02d)\t' % (sec/60, sec%60)
return res
def print_time(*args, same_line=False):
string = ''
for a in args:
string += str(a) + ' '
time = datetime.datetime.now().time().strftime('%H:%M:%S')
offset = _get_time_offset()
res = '%s%s %s' % (str(time), offset, str(string))
print_color(res, same_line=same_line)
def print_info(string, color=32, same_line=False):
print_color('\t' + str(string), color=color, same_line=same_line)
same_line_prev = None
def print_color(string, color=33, same_line=False):
global same_line_prev
res = '%c[1;%dm%s%c[0m' % (27, color, str(string), 27)
if same_line:
print('\r ' +
' ', end=' ')
print('\r' + res, end=' ')
else:
# if same_line_prev:
# print('\n')
print(res)
same_line_prev = same_line
def mnist_select_n_classes(train_images, train_labels, num_classes, min=None, scale=1.0):
result_images, result_labels = [], []
for i, j in zip(train_images, train_labels):
if np.sum(j[0:num_classes]) > 0:
result_images.append(i)
result_labels.append(j[0:num_classes])
inputs = np.asarray(result_images)
inputs *= scale
if min is not None:
inputs = inputs - np.min(inputs) + min
return inputs, np.asarray(result_labels)
# IMAGE OPERATIONS
def _save_image(name='image', save_params=None, image=None):
if save_params is not None and 'e' in save_params and save_params['e'] < EPOCH_THRESHOLD:
print_info('IMAGE: output is not saved. epochs %d < %d' % (save_params['e'], EPOCH_THRESHOLD), color=31)
return
file_name = name if save_params is None else to_file_name(save_params)
file_name += '.png'
name = os.path.join(FLAGS.save_path, file_name)
if image is not None:
misc.imsave(name, arr=image, format='png')
def _show_picture(pic):
fig = plt.figure()
size = fig.get_size_inches()
fig.set_size_inches(size[0], size[1] * 2, forward=True)
plt.imshow(pic, cmap='Greys_r')
def concat_images(im1, im2, axis=0):
if im1 is None:
return im2
return np.concatenate((im1, im2), axis=axis)
def _reconstruct_picture_line(pictures, shape):
line_picture = None
for _, img in enumerate(pictures):
if len(img.shape) == 1:
img = (np.reshape(img, shape))
if len(img.shape) == 3 and img.shape[2] == 1:
img = (np.reshape(img, (img.shape[0], img.shape[1])))
line_picture = concat_images(line_picture, img)
return line_picture
def show_plt():
plt.show()
def _construct_img_shape(img):
assert int(np.sqrt(img.shape[0])) == np.sqrt(img.shape[0])
return int(np.sqrt(img.shape[0])), int(np.sqrt(img.shape[0])), 1
def images_to_uint8(func):
def normalize(arr):
# if type(arr) == np.ndarray and arr.dtype != np.uint8 and len(arr.shape) >= 3:
if type(arr) == np.ndarray and len(arr.shape) >= 3:
if np.min(arr) < 0:
print('image array normalization: negative values')
if np.max(arr) < 10:
arr *= 255
if arr.shape[-1] == 4 or arr.shape[-1] == 2:
old_shape = arr.shape
arr = arr[..., :arr.shape[-1]-1]
return arr.astype(np.uint8)
return arr
def func_wrapper(*args, **kwargs):
new_args = [normalize(el) for el in args]
new_kwargs = {k: normalize(kwargs[k]) for _, k in enumerate(kwargs)}
return func(*tuple(new_args), **new_kwargs)
return func_wrapper
def fig2buf(fig):
fig.canvas.draw()
return fig.canvas.tostring_rgb()
def fig2rgb_array(fig, expand=True):
fig.canvas.draw()
buf = fig.canvas.tostring_rgb()
ncols, nrows = fig.canvas.get_width_height()
shape = (nrows, ncols, 3) if not expand else (1, nrows, ncols, 3)
return np.fromstring(buf, dtype=np.uint8).reshape(shape)
@images_to_uint8
def reconstruct_images_epochs(epochs, original=None, save_params=None, img_shape=None):
full_picture = None
img_shape = img_shape if img_shape is not None else _construct_img_shape(epochs[0][0])
# print(original.dtype, epochs.dtype, np.max(original), np.max(epochs))
if original.dtype != np.uint8:
original = (original * 255).astype(np.uint8)
if epochs.dtype != np.uint8:
epochs = (epochs * 255).astype(np.uint8)
# print('image reconstruction: ', original.dtype, epochs.dtype, np.max(original), np.max(epochs))
if original is not None and epochs is not None and len(epochs) >= 3:
min_ref, max_ref = np.min(original), np.max(original)
print_info('epoch avg: (original: %s) -> %s' % (
str(np.mean(original)), str((np.mean(epochs[0]), np.mean(epochs[1]), np.mean(epochs[2])))))
print_info('reconstruction char. in epochs (min, max)|original: (%f %f)|(%f %f)' % (
np.min(epochs[1:]), np.max(epochs), min_ref, max_ref))
if epochs is not None:
for _, epoch in enumerate(epochs):
full_picture = concat_images(full_picture, _reconstruct_picture_line(epoch, img_shape), axis=1)
if original is not None:
full_picture = concat_images(full_picture, _reconstruct_picture_line(original, img_shape), axis=1)
_show_picture(full_picture)
_save_image(save_params=save_params, image=full_picture)
def model_to_file_name(FLAGS, folder=None, ext=None):
postfix = '' if len(FLAGS.postfix) == 0 else '_%s' % FLAGS.postfix
name = '%s.%s__i_%s%s' % (FLAGS.model, FLAGS.net.replace('-', '_'), FLAGS.input_name, postfix)
if ext:
name += '.' + ext
if folder:
name = os.path.join(folder, name)
return name
def mkdir(folders):
if isinstance(folders, str):
folders = [folders]
for _, folder in enumerate(folders):
if not os.path.exists(folder):
os.mkdir(folder)
def configure_folders(FLAGS):
folder_name = model_to_file_name(FLAGS) + '/'
FLAGS.save_path = os.path.join(TEMP_FOLDER, folder_name)
FLAGS.logdir = FLAGS.save_path
print_color(os.path.abspath(FLAGS.logdir))
mkdir([TEMP_FOLDER, IMAGE_FOLDER, FLAGS.save_path, FLAGS.logdir])
with open(os.path.join(FLAGS.save_path, '!note.txt'), "a") as f:
f.write('\n' + ' '.join(sys.argv) + '\n')
f.write(print_flags(FLAGS, print=False))
if len(FLAGS.comment) > 0:
f.write('\n\n%s\n' % FLAGS.comment)
def get_files(folder="./visualizations/", filter=None):
all = []
for root, dirs, files in os.walk(folder):
# print(root, dirs, files)
if filter:
files = [x for x in files if re.match(filter, x)]
all += files
return [os.path.join(folder, x) for x in all]
def get_latest_file(folder="./visualizations/", filter=None):
latest_file, latest_mod_time = None, None
for root, dirs, files in os.walk(folder):
# print(root, dirs, files)
if filter:
files = [x for x in files if re.match(filter, x)]
# print('\n\r'.join(files))
for file in files:
file_path = os.path.join(root, file)
modification_time = os.path.getmtime(file_path)
if not latest_mod_time or modification_time > latest_mod_time:
latest_mod_time = modification_time
latest_file = file_path
if latest_file is None:
print_info('Could not find file matching %s' % str(filter))
return latest_file
def concatenate(x, y, take=None):
"""
Stitches two np arrays together until maximum length, when specified
"""
if take is not None and x is not None and len(x) >= take:
return x
if x is None:
res = y
else:
res = np.concatenate((x, y))
return res[:take] if take is not None else res
# MISC
def list_object_attributes(obj):
print('Object type: %s\t\tattributes:' % str(type(obj)))
print('\n\t'.join(map(str, obj.__dict__.keys())))
def print_list(list):
print('\n'.join(map(str, list)))
def print_float_list(list, format='%.4f'):
return ' '.join(map(lambda x: format%x, list))
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print('%r %2.2f sec' % (method.__name__, te-ts))
return result
return timed
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.'''
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
import numpy as np
ACCEPTABLE_AVAILABLE_MEMORY = 1024
def mask_busy_gpus(leave_unmasked=1, random=True):
try:
command = "nvidia-smi --query-gpu=memory.free --format=csv"
memory_free_info = _output_to_list(sp.check_output(command.split()))[1:]
memory_free_values = [int(x.split()[0]) for i, x in enumerate(memory_free_info)]
available_gpus = [i for i, x in enumerate(memory_free_values) if x > ACCEPTABLE_AVAILABLE_MEMORY]
if len(available_gpus) < leave_unmasked:
print('Found only %d usable GPUs in the system' % len(available_gpus))
exit(0)
if random:
available_gpus = np.asarray(available_gpus)
np.random.shuffle(available_gpus)
# update CUDA variable
gpus = available_gpus[:leave_unmasked]
setting = ','.join(map(str, gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = setting
print('Left next %d GPU(s) unmasked: [%s] (from %s available)'
% (leave_unmasked, setting, str(available_gpus)))
except FileNotFoundError as e:
print('"nvidia-smi" is probably not installed. GPUs are not masked')
print(e)
except sp.CalledProcessError as e:
print("Error on GPU masking:\n", e.output)
def _output_to_list(output):
return output.decode('ascii').split('\n')[:-1]
def get_gpu_free_session(memory_fraction=0.1):
import tensorflow as tf
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=memory_fraction)
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
def parse_params():
params = {}
for i, param in enumerate(sys.argv):
if '-' in param:
params[param[1:]] = sys.argv[i+1]
print(params)
return params
def print_flags(FLAGS, print=True):
x = FLAGS.input_path
res = 'FLAGS:'
for i in sorted(FLAGS.__dict__['__flags'].items()):
item = str(i)[2:-1].split('\', ')
res += '\n%20s \t%s' % (item[0] + ':', item[1])
if print:
print_info(res)
return res
def _abbreviate_string(value):
str_value = str(value)
abbr = [letter for letter in str_value if letter.isupper()]
if len(abbr) > 1:
return ''.join(abbr)
if len(str_value.split('_')) > 2:
parts = str_value.split('_')
letters = ''.join(x[0] for x in parts)
return letters
return value
def to_file_name(obj, folder=None, ext=None, append_timestamp=False):
name, postfix = '', ''
od = collections.OrderedDict(sorted(obj.items()))
for _, key in enumerate(od):
value = obj[key]
if value is None:
value = 'na'
#FUNC and OBJECTS
if 'function' in str(value):
value = str(value).split()[1].split('.')[0]
parts = value.split('_')
if len(parts) > 1:
value = ''.join(list(map(lambda x: x.upper()[0], parts)))
elif ' at ' in str(value):
value = (str(value).split()[0]).split('.')[-1]
value = _abbreviate_string(value)
elif isinstance(value, type):
value = _abbreviate_string(value.__name__)
# FLOATS
if isinstance(value, float) or isinstance(value, np.float32):
if value < 0.0001:
value = '%.6f' % value
elif value > 1000000:
value = '%.0f' % value
else:
value = '%.4f' % value
value = value.rstrip('0')
#INTS
if isinstance(value, int):
value = '%02d' % value
#LIST
if isinstance(value, list):
value = '|'.join(map(str, value))
truncate_threshold = 20
value = _abbreviate_string(value)
if len(value) > truncate_threshold:
print_info('truncating this: %s %s' % (key, value))
value = value[0:20]
if 'suf' in key or 'postf' in key:
continue
name += '__%s|%s' % (key, str(value))
if 'suf' in obj:
prefix_value = obj['suf']
else:
prefix_value = FLAGS.suffix
if 'postf' in obj:
prefix_value += '_%s' % obj['postf']
name = prefix_value + name
if ext:
name += '.' + ext
if folder:
name = os.path.join(folder, name)
return name
def dict_to_ordereddict(dict):
return collections.OrderedDict(sorted(dict.items()))
def configure_folders_2(FLAGS, meta):
folder_meta = meta.copy()
folder_meta.pop('init')
folder_meta.pop('lr')
folder_meta.pop('opt')
folder_meta.pop('bs')
folder_name = to_file_name(folder_meta) + '/'
checkpoint_folder = os.path.join(TEMP_FOLDER, folder_name)
log_folder = os.path.join(checkpoint_folder, 'log')
mkdir([TEMP_FOLDER, IMAGE_FOLDER, checkpoint_folder, log_folder])
FLAGS.save_path = checkpoint_folder
FLAGS.logdir = log_folder
return checkpoint_folder, log_folder
# precision/recall evaluation
def evaluate_precision_recall(y, target, labels):
import sklearn.metrics as metrics
target = target[:len(y)]
num_classes = max(target) + 1
results = []
for i in range(num_classes):
class_target = _extract_single_class(i, target)
class_y = _extract_single_class(i, y)
results.append({
'precision': metrics.precision_score(class_target, class_y),
'recall': metrics.recall_score(class_target, class_y),
'f1': metrics.f1_score(class_target, class_y),
'fraction': sum(class_target)/len(target),
'#of_class': int(sum(class_target)),
'label': labels[i],
'label_id': i
# 'tp': tp
})
print('%d/%d' % (i, num_classes), results[-1])
accuracy = metrics.accuracy_score(target, y)
return accuracy, results
def _extract_single_class(i, classes):
res, i = classes + 1, i + 1
res[res != i] = 0
res = np.asarray(res)
res = res / i
return res
def print_relevance_info(relevance, prefix='', labels=None):
labels = labels if labels is not None else np.arange(len(relevance))
separator = '\n\t' if len(relevance) > 3 else ' '
result = '%s format: [" label": f1_score (precision recall) label_percentage]' % prefix
format = '\x1B[0m%s\t"%25s":\x1B[31;40m%.2f\x1B[0m (%.2f %.2f) %d%%'
for i, label_relevance in enumerate(relevance):
result += format % (separator,
str(labels[i]),
label_relevance['f1'],
label_relevance['precision'],
label_relevance['recall'],
int(label_relevance['fraction']*10000)/100.
)
print(result)
def disalbe_tensorflow_warnings():
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
@timeit
def images_to_sprite(arr, path=None):
assert len(arr) <= 100*100
arr = arr[...,:3]
resized = [scipy.misc.imresize(x[..., :3], size=[80, 80]) for x in arr]
base = np.zeros([8000, 8000, 3], np.uint8)
for i in range(100):
for j in range(100):
index = j+100*i
if index < len(resized):
base[80*i:80*i+80, 80*j:80*j+80] = resized[index]
scipy.misc.imsave(path, base)
def generate_tsv(num, path):
with open(path, mode='w') as f:
[f.write('%d\n' % i) for i in range(num)]
def paste_patch(patch, base_size=40, upper_half=True):
channels = patch.shape[-1]
base = np.zeros((base_size, base_size, channels), dtype=np.uint8)
position_x = random.randint(0, base_size - patch.shape[0])
position_y = random.randint(0, base_size / 2 - patch.shape[1])
if not upper_half: position_y += int(base_size / 2)
base[position_x:position_x + patch.shape[0], position_y:position_y + patch.shape[1], :] = patch
return base
if __name__ == '__main__':
data = []
for i in range(10):
data.append((str(i), np.random.rand(1000)))
mask_busy_gpus() | mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/core/nanops.py | 1 | 40573 | import functools
import itertools
import operator
from typing import Any, Optional, Tuple, Union
import numpy as np
from pandas._config import get_option
from pandas._libs import iNaT, lib, tslibs
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask
from pandas.core.dtypes.common import (
_get_dtype,
is_any_int_dtype,
is_bool_dtype,
is_complex,
is_complex_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
import pandas.core.common as com
bn = import_optional_dependency("bottleneck", raise_on_missing=False, on_version="warn")
_BOTTLENECK_INSTALLED = bn is not None
_USE_BOTTLENECK = False
def set_use_bottleneck(v=True):
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option("compute.use_bottleneck"))
class disallow:
def __init__(self, *dtypes):
super().__init__()
self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)
def check(self, obj):
return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes)
def __call__(self, f):
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, kwargs.values())
if any(self.check(obj) for obj in obj_iter):
msg = "reduction operation {name!r} not allowed for this dtype"
raise TypeError(msg.format(name=f.__name__.replace("nan", "")))
try:
with np.errstate(invalid="ignore"):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e)
raise
return _f
class bottleneck_switch:
def __init__(self, name=None, **kwargs):
self.name = name
self.kwargs = kwargs
def __call__(self, alt):
bn_name = self.name or alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(values, axis=None, skipna=True, **kwds):
if len(self.kwargs) > 0:
for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
try:
if values.size == 0 and kwds.get("min_count") is None:
# We are empty, returning NA for our type
# Only applies for the default `min_count` of None
# since that affects how empty arrays are handled.
# TODO(GH-18976) update all the nanops methods to
# correctly handle empty inputs and remove this check.
# It *may* just be `var`
return _na_for_min_count(values, axis)
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except Exception:
try:
result = alt(values, axis=axis, skipna=skipna, **kwds)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(values):
raise TypeError(e)
raise
return result
return f
def _bn_ok_dtype(dt, name):
# Bottleneck chokes on datetime64
if not is_object_dtype(dt) and not (
is_datetime_or_timedelta_dtype(dt) or is_datetime64tz_dtype(dt)
):
# GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
# GH 9422
# further we also want to preserve NaN when all elements
# are NaN, unlinke bottleneck/numpy which consider this
# to be 0
if name in ["nansum", "nanprod"]:
return False
return True
return False
def _has_infs(result):
if isinstance(result, np.ndarray):
if result.dtype == "f8":
return lib.has_infs_f8(result.ravel())
elif result.dtype == "f4":
return lib.has_infs_f4(result.ravel())
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == "+inf":
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return tslibs.iNaT
else:
if fill_value_typ == "+inf":
# need the max int here
return _int64_max
else:
return tslibs.iNaT
def _maybe_get_mask(
values: np.ndarray, skipna: bool, mask: Optional[np.ndarray]
) -> Optional[np.ndarray]:
""" This function will compute a mask iff it is necessary. Otherwise,
return the provided mask (potentially None) when a mask does not need to be
computed.
A mask is never necessary if the values array is of boolean or integer
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
dtype that is interpretable as either boolean or integer data (eg,
timedelta64), a mask must be provided.
If the skipna parameter is False, a new mask will not be computed.
The mask is computed using isna() by default. Setting invert=True selects
notna() as the masking function.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
mask : Optional[ndarray]
nan-mask if known
Returns
-------
Optional[np.ndarray]
"""
if mask is None:
if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype):
# Boolean data cannot contain nulls, so signal via mask being None
return None
if skipna:
mask = isna(values)
return mask
def _get_values(
values: np.ndarray,
skipna: bool,
fill_value: Any = None,
fill_value_typ: Optional[str] = None,
mask: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, Optional[np.ndarray], np.dtype, np.dtype, Any]:
""" Utility to get the values view, mask, dtype, dtype_max, and fill_value.
If both mask and fill_value/fill_value_typ are not None and skipna is True,
the values array will be copied.
For input arrays of boolean or integer dtypes, copies will only occur if a
precomputed mask, a fill_value/fill_value_typ, and skipna=True are
provided.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
fill_value : Any
value to fill NaNs with
fill_value_typ : str
Set to '+inf' or '-inf' to handle dtype-specific infinities
mask : Optional[np.ndarray]
nan-mask if known
Returns
-------
values : ndarray
Potential copy of input value array
mask : Optional[ndarray[bool]]
Mask for values, if deemed necessary to compute
dtype : dtype
dtype for values
dtype_max : dtype
platform independent dtype
fill_value : Any
fill value used
"""
mask = _maybe_get_mask(values, skipna, mask)
if is_datetime64tz_dtype(values):
# com.values_from_object returns M8[ns] dtype instead of tz-aware,
# so this case must be handled separately from the rest
dtype = values.dtype
values = getattr(values, "_values", values)
else:
values = com.values_from_object(values)
dtype = values.dtype
if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values):
# changing timedelta64/datetime64 to int64 needs to happen after
# finding `mask` above
values = getattr(values, "asi8", values)
values = values.view(np.int64)
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(
dtype, fill_value=fill_value, fill_value_typ=fill_value_typ
)
copy = (mask is not None) and (fill_value is not None)
if skipna and copy:
values = values.copy()
if dtype_ok:
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, changed = maybe_upcast_putmask(values, mask, fill_value)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.int64
elif is_float_dtype(dtype):
dtype_max = np.float64
return values, mask, dtype, dtype_max, fill_value
def _isfinite(values):
if is_datetime_or_timedelta_dtype(values):
return isna(values)
if (
is_complex_dtype(values)
or is_float_dtype(values)
or is_integer_dtype(values)
or is_bool_dtype(values)
):
return ~np.isfinite(values)
return ~np.isfinite(values.astype("float64"))
def _na_ok_dtype(dtype):
# TODO: what about datetime64tz? PeriodDtype?
return not issubclass(dtype.type, (np.integer, np.timedelta64, np.datetime64))
def _wrap_results(result, dtype, fill_value=None):
""" wrap our results if needed """
if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
if fill_value is None:
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
tz = getattr(dtype, "tz", None)
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
result = tslibs.Timestamp(result, tz=tz)
else:
result = result.view(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
if result == fill_value:
result = np.nan
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > _int64_max:
raise ValueError("overflow in timedelta operation")
result = tslibs.Timedelta(result, unit="ns")
else:
result = result.astype("i8").view(dtype)
return result
def _na_for_min_count(values, axis):
"""Return the missing value for `values`
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
"""
# we either return np.nan or pd.NaT
if is_numeric_dtype(values):
values = values.astype("float64")
fill_value = na_value_for_dtype(values.dtype)
if values.ndim == 1:
return fill_value
else:
result_shape = values.shape[:axis] + values.shape[axis + 1 :]
result = np.empty(result_shape, dtype=values.dtype)
result.fill(fill_value)
return result
def nanany(values, axis=None, skipna=True, mask=None):
"""
Check if any elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2])
>>> nanops.nanany(s)
True
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([np.nan])
>>> nanops.nanany(s)
False
"""
values, _, _, _, _ = _get_values(values, skipna, fill_value=False, mask=mask)
return values.any(axis)
def nanall(values, axis=None, skipna=True, mask=None):
"""
Check if all elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanall(s)
True
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 0])
>>> nanops.nanall(s)
False
"""
values, _, _, _, _ = _get_values(values, skipna, fill_value=True, mask=mask)
return values.all(axis)
@disallow("M8")
def nansum(values, axis=None, skipna=True, min_count=0, mask=None):
"""
Sum the elements along an axis ignoring NaNs
Parameters
----------
values : ndarray[dtype]
axis: int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nansum(s)
3.0
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
return _wrap_results(the_sum, dtype)
@disallow("M8", DatetimeTZDtype)
@bottleneck_switch()
def nanmean(values, axis=None, skipna=True, mask=None):
"""
Compute the mean of the element along an axis ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanmean(s)
1.5
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
dtype_count = np.float64
if (
is_integer_dtype(dtype)
or is_timedelta64_dtype(dtype)
or is_datetime64_dtype(dtype)
or is_datetime64tz_dtype(dtype)
):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, "ndim", False):
with np.errstate(all="ignore"):
# suppress division by zero warnings
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return _wrap_results(the_mean, dtype)
@disallow("M8")
@bottleneck_switch()
def nanmedian(values, axis=None, skipna=True, mask=None):
"""
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 2])
>>> nanops.nanmedian(s)
2.0
"""
def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
return np.nanmedian(x[mask])
values, mask, dtype, dtype_max, _ = _get_values(values, skipna, mask=mask)
if not is_float_dtype(values):
values = values.astype("f8")
if mask is not None:
values[mask] = np.nan
if axis is None:
values = values.ravel()
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
return _wrap_results(
np.apply_along_axis(get_median, axis, values), dtype
)
# fastpath for the skipna case
return _wrap_results(np.nanmedian(values, axis), dtype)
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
shp = np.array(values.shape)
dims = np.arange(values.ndim)
ret = np.empty(shp[dims != axis])
ret.fill(np.nan)
return _wrap_results(ret, dtype)
# otherwise return a scalar value
return _wrap_results(get_median(values) if notempty else np.nan, dtype)
def _get_counts_nanvar(
value_counts: Tuple[int],
mask: Optional[np.ndarray],
axis: Optional[int],
ddof: int,
dtype=float,
) -> Tuple[Union[int, np.ndarray], Union[int, np.ndarray]]:
""" Get the count of non-null values along an axis, accounting
for degrees of freedom.
Parameters
----------
values_shape : Tuple[int]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
ddof : int
degrees of freedom
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
d : scalar or array
"""
dtype = _get_dtype(dtype)
count = _get_counts(value_counts, mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2 = count <= ddof # type: np.ndarray
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@disallow("M8")
@bottleneck_switch(ddof=1)
def nanstd(values, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the standard deviation along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanstd(s)
1.0
"""
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))
return _wrap_results(result, values.dtype)
@disallow("M8")
@bottleneck_switch(ddof=1)
def nanvar(values, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanvar(s)
1.0
"""
values = com.values_from_object(values)
dtype = values.dtype
mask = _maybe_get_mask(values, skipna, mask)
if is_any_int_dtype(values):
values = values.astype("f8")
if mask is not None:
values[mask] = np.nan
if is_float_dtype(values):
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values) ** 2)
if mask is not None:
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return _wrap_results(result, values.dtype)
@disallow("M8", "m8")
def nansem(values, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nansem(s)
0.5773502691896258
"""
# This checks if non-numeric-like data is passed with numeric_only=False
# and raises a TypeError otherwise
nanvar(values, axis, skipna, ddof=ddof, mask=mask)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
var = nanvar(values, axis, skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name="nan" + meth)
def reduction(values, axis=None, skipna=True, mask=None):
values, mask, dtype, dtype_max, fill_value = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask
)
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except (AttributeError, TypeError, ValueError, np.core._internal.AxisError):
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _wrap_results(result, dtype, fill_value)
return _maybe_null_out(result, axis, mask, values.shape)
return reduction
nanmin = _nanminmax("min", fill_value_typ="+inf")
nanmax = _nanminmax("max", fill_value_typ="-inf")
@disallow("O")
def nanargmax(values, axis=None, skipna=True, mask=None):
"""
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int
The index of max value in specified axis or -1 in the NA case
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, 3, np.nan, 4])
>>> nanops.nanargmax(s)
4
"""
values, mask, dtype, _, _ = _get_values(
values, True, fill_value_typ="-inf", mask=mask
)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow("O")
def nanargmin(values, axis=None, skipna=True, mask=None):
"""
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int
The index of min value in specified axis or -1 in the NA case
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, 3, np.nan, 4])
>>> nanops.nanargmin(s)
0
"""
values, mask, dtype, _, _ = _get_values(
values, True, fill_value_typ="+inf", mask=mask
)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow("M8", "m8")
def nanskew(values, axis=None, skipna=True, mask=None):
""" Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1,np.nan, 1, 2])
>>> nanops.nanskew(s)
1.7320508075688787
"""
values = com.values_from_object(values)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
#
# #18044 in _libs/windows.pyx calc_skew follow this behavior
# to fix the fperr to treat m2 <1e-14 as zero
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid="ignore", divide="ignore"):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow("M8", "m8")
def nankurt(values, axis=None, skipna=True, mask=None):
"""
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1,np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
"""
values = com.values_from_object(values)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid="ignore", divide="ignore"):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2 ** 2
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid="ignore", divide="ignore"):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow("M8", "m8")
def nanprod(values, axis=None, skipna=True, min_count=0, mask=None):
"""
Parameters
----------
values : ndarray[dtype]
axis: int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, 3, np.nan])
>>> nanops.nanprod(s)
6.0
Returns
-------
The product of all elements on a given axis. ( NaNs are treated as 1)
"""
mask = _maybe_get_mask(values, skipna, mask)
if skipna and mask is not None:
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask, values.shape, min_count=min_count)
def _maybe_arg_null_out(
result: np.ndarray, axis: Optional[int], mask: Optional[np.ndarray], skipna: bool
) -> Union[np.ndarray, int]:
# helper function for nanargmin/nanargmax
if mask is None:
return result
if axis is None or not getattr(result, "ndim", False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(
values_shape: Tuple[int],
mask: Optional[np.ndarray],
axis: Optional[int],
dtype=float,
) -> Union[int, np.ndarray]:
""" Get the count of non-null values along an axis
Parameters
----------
values_shape : Tuple[int]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
"""
dtype = _get_dtype(dtype)
if axis is None:
if mask is not None:
n = mask.size - mask.sum()
else:
n = np.prod(values_shape)
return dtype.type(n)
if mask is not None:
count = mask.shape[axis] - mask.sum(axis)
else:
count = values_shape[axis]
if is_scalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(
result: np.ndarray,
axis: Optional[int],
mask: Optional[np.ndarray],
shape: Tuple,
min_count: int = 1,
) -> np.ndarray:
if mask is not None and axis is not None and getattr(result, "ndim", False):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype("c16")
else:
result = result.astype("f8")
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not tslibs.NaT:
if mask is not None:
null_mask = mask.size - mask.sum()
else:
null_mask = np.prod(shape)
if null_mask < min_count:
result = np.nan
return result
def _zero_out_fperr(arg):
# #18044 reference this behavior to fix rolling skew/kurt issue
if isinstance(arg, np.ndarray):
with np.errstate(invalid="ignore"):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow("M8", "m8")
def nancorr(a, b, method="pearson", min_periods=None):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError("Operands to nancorr must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method in ["kendall", "spearman"]:
from scipy.stats import kendalltau, spearmanr
elif callable(method):
return method
def _pearson(a, b):
return np.corrcoef(a, b)[0, 1]
def _kendall(a, b):
rs = kendalltau(a, b)
if isinstance(rs, tuple):
return rs[0]
return rs
def _spearman(a, b):
return spearmanr(a, b)[0]
_cor_methods = {"pearson": _pearson, "kendall": _kendall, "spearman": _spearman}
return _cor_methods[method]
@disallow("M8", "m8")
def nancov(a, b, min_periods=None):
if len(a) != len(b):
raise AssertionError("Operands to nancov must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except (TypeError, ValueError):
x = x.astype(np.float64)
else:
if not np.any(np.imag(x)):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except Exception:
try:
x = complex(x)
except Exception:
raise TypeError(
"Could not convert {value!s} to numeric".format(value=x)
)
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isna(x)
ymask = isna(y)
mask = xmask | ymask
with np.errstate(all="ignore"):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype("O")
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def _nanpercentile_1d(values, mask, q, na_value, interpolation):
"""
Wraper for np.percentile that skips missing values, specialized to
1-dimensional case.
Parameters
----------
values : array over which to find quantiles
mask : ndarray[bool]
locations in values that should be considered missing
q : scalar or array of quantile indices to find
na_value : scalar
value to return for empty or all-null values
interpolation : str
Returns
-------
quantiles : scalar or array
"""
# mask is Union[ExtensionArray, ndarray]
values = values[~mask]
if len(values) == 0:
if lib.is_scalar(q):
return na_value
else:
return np.array([na_value] * len(q), dtype=values.dtype)
return np.percentile(values, q, interpolation=interpolation)
def nanpercentile(values, q, axis, na_value, mask, ndim, interpolation):
"""
Wraper for np.percentile that skips missing values.
Parameters
----------
values : array over which to find quantiles
q : scalar or array of quantile indices to find
axis : {0, 1}
na_value : scalar
value to return for empty or all-null values
mask : ndarray[bool]
locations in values that should be considered missing
ndim : {1, 2}
interpolation : str
Returns
-------
quantiles : scalar or array
"""
if not lib.is_scalar(mask) and mask.any():
if ndim == 1:
return _nanpercentile_1d(
values, mask, q, na_value, interpolation=interpolation
)
else:
# for nonconsolidatable blocks mask is 1D, but values 2D
if mask.ndim < values.ndim:
mask = mask.reshape(values.shape)
if axis == 0:
values = values.T
mask = mask.T
result = [
_nanpercentile_1d(val, m, q, na_value, interpolation=interpolation)
for (val, m) in zip(list(values), list(mask))
]
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
return np.percentile(values, q, axis=axis, interpolation=interpolation)
| apache-2.0 |
gundramleifert/exp_tf | models/lp/bdlstm_lp_v3.py | 1 | 13744 | '''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 4
learningRate = 0.001
momentum = 0.9
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
stepsPerEpocheTrain = len(trainList) / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def inference(images, seqLen):
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(conv1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(conv2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
outputs, _, _ = bidirectional_rnn(forwardH1, backwardH1, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_mean(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
logits3d, seqAfterConv = inference(inputX, seqLengths)
loss = loss(logits3d, targetY, seqAfterConv)
optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
saver = tf.train.Saver()
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: batchSeqLengths}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp3/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
# Defining graph
# Initializing
# Epoch 1 ...
# Train: CTC-loss 127770.795485
# Train: CER 0.632449947914
# Train time 1623.64185095
# Val: CTC-loss 1690.94079254
# Val: CER 0.0842024714947
# Val time 53.6754989624
# Epoch 2 ...
# Train: CTC-loss 16142.664189
# Train: CER 0.0719488524786
# Train time 1643.34809399
# Val: CTC-loss 1180.64658372
# Val: CER 0.0566876666149
# Val time 52.8805279732
# Epoch 3 ...
# Train: CTC-loss 12782.7443373
# Train: CER 0.0572215012803
# Train time 1642.10430193
# Val: CTC-loss 1087.44890879
# Val: CER 0.0513329066634
# Val time 53.3245558739
# Epoch 4 ...
# Train: CTC-loss 11220.8490879
# Train: CER 0.0502287249668
# Train time 1634.754421
# Val: CTC-loss 1082.34532301
# Val: CER 0.0496852177829
# Val time 52.6013569832
# Epoch 5 ...
# Train: CTC-loss 10291.8197946
# Train: CER 0.0467629148429
# Train time 1625.04631186
# Val: CTC-loss 1040.13378663
# Val: CER 0.0486953979631
# Val time 52.304046154
# Epoch 6 ...
# Train: CTC-loss 9489.53495804
# Train: CER 0.0432804138749
# Train time 1622.64882994
# Val: CTC-loss 1023.34265649
# Val: CER 0.0468253398041
# Val time 52.2032320499
# Epoch 7 ...
# Train: CTC-loss 8878.37165775
# Train: CER 0.0410694975078
# Train time 1616.60257196
# Val: CTC-loss 998.865922881
# Val: CER 0.0455678806404
# Val time 51.6127068996
# Epoch 8 ...
# Train: CTC-loss 8349.18803357
# Train: CER 0.0387621530634
# Train time 1613.39381695
# Val: CTC-loss 1005.80148646
# Val: CER 0.0450465412537
# Val time 51.599864006
# Epoch 9 ...
# Train: CTC-loss 7863.9768993
# Train: CER 0.0373975759733
# Train time 2364.71975899
# Val: CTC-loss 1025.37011006
# Val: CER 0.0448487868408
# Val time 127.908433914
# Epoch 10 ...
# Train: CTC-loss 7474.0068357
# Train: CER 0.0360007262832
# Train time 5664.99592304
# Val: CTC-loss 1030.9719641
# Val: CER 0.045286204497
# Val time 240.020387888
# Epoch 11 ...
# Train: CTC-loss 7192.40476506
# Train: CER 0.0348674249148
# Train time 7002.88452506
# Val: CTC-loss 1045.4683604
# Val: CER 0.0458950943997
# Val time 232.987906933
# Epoch 12 ...
# Train: CTC-loss 6885.85843309
# Train: CER 0.0336379728044
# Train time 6940.85707903
# Val: CTC-loss 1063.69808645
# Val: CER 0.0471115465313
# Val time 226.829921961
# Epoch 13 ...
# Train: CTC-loss 6587.33486168
# Train: CER 0.0324386308837
# Train time 7016.62783194
# Val: CTC-loss 1115.10326525
# Val: CER 0.0477711562912
# Val time 233.850280046
# Epoch 14 ...
# Train: CTC-loss 6396.92553726
# Train: CER 0.0316869818918
# Train time 6944.38106823
# Val: CTC-loss 1161.44386697
# Val: CER 0.0474798077444
# Val time 237.52304101
# Epoch 15 ...
# Train: CTC-loss 6218.94439296
# Train: CER 0.0306587755172
# Train time 6963.23701501
# Val: CTC-loss 1119.81188818
# Val: CER 0.0477910614709
# Val time 235.797486067 | apache-2.0 |
kaichogami/scikit-learn | sklearn/utils/extmath.py | 7 | 25862 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter=2,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix.
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=2,
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter: int (default is 2)
Number of power iterations (can be used to deal with very noisy
problems).
.. versionchanged:: 0.18
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
solo7773/visnormsc | pyNormsc/nodesPQ/pQ.py | 1 | 2570 |
import numpy as np
import pandas as pd
import copy
def pQ(data=None, frac=0.5, throw_sd=1, hard_outlier=500):
'''
:param data: rows, genes; columns, cells; Expression data frame with genes in rows. pQ can be applied to raw-count, FPKM or other expression quantification formats. Gene names should be supplied as rownames whereas sample names as column names.
:param frac: determines a threshold thr = Q1-frac*IQR; cells with detected genes less than thr+1 would be thrown away.
:param throw_sd: A non zero value will throw away genes that have no non-pseudo count entry after normalization, default is 1.
:param hard_outlier: A hard lower bound for minimum number of detected genes, default 500.
:return: normalized data as a data frame retaining orginal row and column names.
'''
# get threshold
thr = np.floor(th(data, frac, hard_outlier))
FPKMbackup = copy.deepcopy(data)
# finding cells which have thr+1 genes
if (thr != 0):
bu = FPKMbackup.apply(lambda x: (x > 0).sum(), axis=0) > thr
FPKMreduced = FPKMbackup.loc[:, bu]
# cells thrown away
print("No. of discarded cells:", len(bu) - sum(bu))
else:
FPKMreduced = FPKMbackup
# minimum number expressed genes in remaining cells
det_no = min(FPKMreduced.apply(lambda x: (x > 0).sum(), axis=0))
# Q normalization
X = quantileNormalize(FPKMreduced)
# Find value for det_no + 1 ranked genes
key = X.iloc[:,0].sort_values(ascending=False).values[int(thr+1)]
# level tails
X[X<=key] = key
# throw zero deviation genes
if throw_sd != 0:
B = X.std(axis=1, skipna=True) > 1e-6
X = X.loc[B,:]
print('Dimensions of supplied expression matrix:\n', 'Genes =', data.shape[0], '; Cells =', data.shape[1])
print('Dimensions of supplied processed matrix:\n', 'Genes =', X.shape[0], '; Cells =', X.shape[1])
return X
def quantileNormalize(df_input):
# rows genes, columns cells
df = df_input.copy()
rankMedian = df.stack().groupby(df.rank(method='first').stack().astype(int)).median()
df2 = df.rank(method='min').stack().astype(int).map(rankMedian).unstack()
return df2
def th(d=None, frac=0.5, hard_out=None):
# get number of detected genes
expr = d.apply(lambda x: (x > 0).sum(), axis=0)
th = np.floor(expr.quantile([0, 0.25, 0.5, 0.75, 1]).iloc[1] - abs(expr.quantile([0, 0.25, 0.5, 0.75, 1]).iloc[3] - expr.quantile([0, 0.25, 0.5, 0.75, 1]).iloc[1]) * frac)
if (th < hard_out):
th = hard_out
return th
| gpl-3.0 |
CZCV/s-dilation-caffe | examples/finetune_flickr_style/assemble_data.py | 38 | 3636 | #!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.