repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pulinagrawal/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/collections.py
|
69
|
39876
|
"""
Classes for the efficient drawing of large collections of objects that
share most properties, e.g. a large number of line segments or
polygons.
The classes are not meant to be as flexible as their single element
counterparts (e.g. you may not be able to select all line styles) but
they are meant to be fast for common use cases (e.g. a bunch of solid
line segemnts)
"""
import copy, math, warnings
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.cbook as cbook
import matplotlib.colors as _colors # avoid conflict with kwarg
import matplotlib.cm as cm
import matplotlib.transforms as transforms
import matplotlib.artist as artist
import matplotlib.backend_bases as backend_bases
import matplotlib.path as mpath
import matplotlib.mlab as mlab
class Collection(artist.Artist, cm.ScalarMappable):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets).
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_offsets = np.array([], np.float_)
_transOffset = transforms.IdentityTransform()
_transforms = []
zorder = 1
def __init__(self,
edgecolors=None,
facecolors=None,
linewidths=None,
linestyles='solid',
antialiaseds = None,
offsets = None,
transOffset = None,
norm = None, # optional for ScalarMappable
cmap = None, # ditto
pickradius = 5.0,
urls = None,
**kwargs
):
"""
Create a Collection
%(Collection)s
"""
artist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.set_edgecolor(edgecolors)
self.set_facecolor(facecolors)
self.set_linewidth(linewidths)
self.set_linestyle(linestyles)
self.set_antialiased(antialiaseds)
self.set_urls(urls)
self._uniform_offsets = None
self._offsets = np.array([], np.float_)
if offsets is not None:
offsets = np.asarray(offsets)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
if transOffset is not None:
self._offsets = offsets
self._transOffset = transOffset
else:
self._uniform_offsets = offsets
self._pickradius = pickradius
self.update(kwargs)
def _get_value(self, val):
try: return (float(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: float(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a float or nonzero sequence of floats')
def _get_bool(self, val):
try: return (bool(val), )
except TypeError:
if cbook.iterable(val) and len(val):
try: bool(val[0])
except TypeError: pass # raise below
else: return val
raise TypeError('val must be a bool or nonzero sequence of them')
def get_paths(self):
raise NotImplementedError
def get_transforms(self):
return self._transforms
def get_datalim(self, transData):
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if not transform.is_affine:
paths = [transform.transform_path_non_affine(p) for p in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
offsets = np.asarray(offsets, np.float_)
result = mpath.get_path_collection_extents(
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset.frozen())
result = result.inverse_transformed(transData)
return result
def get_window_extent(self, renderer):
bbox = self.get_datalim(transforms.IdentityTransform())
#TODO:check to ensure that this does not fail for
#cases other than scatter plot legend
return bbox
def _prepare_points(self):
"""Point prep for drawing and hit testing"""
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
paths = self.get_paths()
if self.have_units():
paths = []
for path in self.get_paths():
vertices = path.vertices
xs, ys = vertices[:, 0], vertices[:, 1]
xs = self.convert_xunits(xs)
ys = self.convert_yunits(ys)
paths.append(mpath.Path(zip(xs, ys), path.codes))
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if not transform.is_affine:
paths = [transform.transform_path_non_affine(path) for path in paths]
transform = transform.get_affine()
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
return transform, transOffset, offsets, paths
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
transform, transOffset, offsets, paths = self._prepare_points()
renderer.draw_path_collection(
transform.frozen(), self.clipbox, clippath, clippath_trans,
paths, self.get_transforms(),
offsets, transOffset,
self.get_facecolor(), self.get_edgecolor(), self._linewidths,
self._linestyles, self._antialiaseds, self._urls)
renderer.close_group(self.__class__.__name__)
def contains(self, mouseevent):
"""
Test whether the mouse event occurred in the collection.
Returns True | False, ``dict(ind=itemlist)``, where every
item in itemlist contains the event.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible(): return False,{}
transform, transOffset, offsets, paths = self._prepare_points()
ind = mpath.point_in_path_collection(
mouseevent.x, mouseevent.y, self._pickradius,
transform.frozen(), paths, self.get_transforms(),
offsets, transOffset, len(self._facecolors)>0)
return len(ind)>0,dict(ind=ind)
def set_pickradius(self,pickradius): self.pickradius = 5
def get_pickradius(self): return self.pickradius
def set_urls(self, urls):
if urls is None:
self._urls = [None,]
else:
self._urls = urls
def get_urls(self): return self._urls
def set_offsets(self, offsets):
"""
Set the offsets for the collection. *offsets* can be a scalar
or a sequence.
ACCEPTS: float or sequence of floats
"""
offsets = np.asarray(offsets, np.float_)
if len(offsets.shape) == 1:
offsets = offsets[np.newaxis,:] # Make it Nx2.
#This decision is based on how they are initialized above
if self._uniform_offsets is None:
self._offsets = offsets
else:
self._uniform_offsets = offsets
def get_offsets(self):
"""
Return the offsets for the collection.
"""
#This decision is based on how they are initialized above in __init__()
if self._uniform_offsets is None:
return self._offsets
else:
return self._uniform_offsets
def set_linewidth(self, lw):
"""
Set the linewidth(s) for the collection. *lw* can be a scalar
or a sequence; if it is a sequence the patches will cycle
through the sequence
ACCEPTS: float or sequence of floats
"""
if lw is None: lw = mpl.rcParams['patch.linewidth']
self._linewidths = self._get_value(lw)
def set_linewidths(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the linestyle(s) for the collection.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) ]
"""
try:
dashd = backend_bases.GraphicsContextBase.dashd
if cbook.is_string_like(ls):
if ls in dashd:
dashes = [dashd[ls]]
elif ls in cbook.ls_mapper:
dashes = [dashd[cbook.ls_mapper[ls]]]
else:
raise ValueError()
elif cbook.iterable(ls):
try:
dashes = []
for x in ls:
if cbook.is_string_like(x):
if x in dashd:
dashes.append(dashd[x])
elif x in cbook.ls_mapper:
dashes.append(dashd[cbook.ls_mapper[x]])
else:
raise ValueError()
elif cbook.iterable(x) and len(x) == 2:
dashes.append(x)
else:
raise ValueError()
except ValueError:
if len(ls)==2:
dashes = ls
else:
raise ValueError()
else:
raise ValueError()
except ValueError:
raise ValueError('Do not know how to convert %s to dashes'%ls)
self._linestyles = dashes
def set_linestyles(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_dashes(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_antialiased(self, aa):
"""
Set the antialiasing state for rendering.
ACCEPTS: Boolean or sequence of booleans
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiaseds = self._get_bool(aa)
def set_antialiaseds(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_facecolor(self, c):
"""
Set the facecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c is None: c = mpl.rcParams['patch.facecolor']
self._facecolors_original = c
self._facecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_facecolors(self, c):
"""alias for set_facecolor"""
return self.set_facecolor(c)
def get_facecolor(self):
return self._facecolors
get_facecolors = get_facecolor
def get_edgecolor(self):
if self._edgecolors == 'face':
return self.get_facecolors()
else:
return self._edgecolors
get_edgecolors = get_edgecolor
def set_edgecolor(self, c):
"""
Set the edgecolor(s) of the collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence.
If *c* is 'face', the edge color will always be the same as
the face color.
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
if c == 'face':
self._edgecolors = 'face'
self._edgecolors_original = 'face'
else:
if c is None: c = mpl.rcParams['patch.edgecolor']
self._edgecolors_original = c
self._edgecolors = _colors.colorConverter.to_rgba_array(c, self._alpha)
def set_edgecolors(self, c):
"""alias for set_edgecolor"""
return self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparencies of the collection. *alpha* must be
a float.
ACCEPTS: float
"""
try: float(alpha)
except TypeError: raise TypeError('alpha must be a float')
else:
artist.Artist.set_alpha(self, alpha)
try:
self._facecolors = _colors.colorConverter.to_rgba_array(
self._facecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
try:
if self._edgecolors_original != 'face':
self._edgecolors = _colors.colorConverter.to_rgba_array(
self._edgecolors_original, self._alpha)
except (AttributeError, TypeError, IndexError):
pass
def get_linewidths(self):
return self._linewidths
get_linewidth = get_linewidths
def get_linestyles(self):
return self._linestyles
get_dashes = get_linestyle = get_linestyles
def update_scalarmappable(self):
"""
If the scalar mappable array is not none, update colors
from scalar data
"""
if self._A is None: return
if self._A.ndim > 1:
raise ValueError('Collections can only map rank 1 arrays')
if len(self._facecolors):
self._facecolors = self.to_rgba(self._A, self._alpha)
else:
self._edgecolors = self.to_rgba(self._A, self._alpha)
def update_from(self, other):
'copy properties from other to self'
artist.Artist.update_from(self, other)
self._antialiaseds = other._antialiaseds
self._edgecolors_original = other._edgecolors_original
self._edgecolors = other._edgecolors
self._facecolors_original = other._facecolors_original
self._facecolors = other._facecolors
self._linewidths = other._linewidths
self._linestyles = other._linestyles
self._pickradius = other._pickradius
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Collection'] = """\
Valid Collection keyword arguments:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets)
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
"""
class QuadMesh(Collection):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
"""
def __init__(self, meshWidth, meshHeight, coordinates, showedges, antialiased=True):
Collection.__init__(self)
self._meshWidth = meshWidth
self._meshHeight = meshHeight
self._coordinates = coordinates
self._showedges = showedges
self._antialiased = antialiased
self._paths = None
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(coordinates.reshape(
((meshWidth + 1) * (meshHeight + 1), 2)))
# By converting to floats now, we can avoid that on every draw.
self._coordinates = self._coordinates.reshape((meshHeight + 1, meshWidth + 1, 2))
self._coordinates = np.array(self._coordinates, np.float_)
def get_paths(self, dataTrans=None):
if self._paths is None:
self._paths = self.convert_mesh_to_paths(
self._meshWidth, self._meshHeight, self._coordinates)
return self._paths
#@staticmethod
def convert_mesh_to_paths(meshWidth, meshHeight, coordinates):
"""
Converts a given mesh into a sequence of
:class:`matplotlib.path.Path` objects for easier rendering by
backends that do not directly support quadmeshes.
This function is primarily of use to backend implementers.
"""
Path = mpath.Path
if ma.isMaskedArray(coordinates):
c = coordinates.data
else:
c = coordinates
points = np.concatenate((
c[0:-1, 0:-1],
c[0:-1, 1: ],
c[1: , 1: ],
c[1: , 0:-1],
c[0:-1, 0:-1]
), axis=2)
points = points.reshape((meshWidth * meshHeight, 5, 2))
return [Path(x) for x in points]
convert_mesh_to_paths = staticmethod(convert_mesh_to_paths)
def get_datalim(self, transData):
return self._bbox
def draw(self, renderer):
if not self.get_visible(): return
renderer.open_group(self.__class__.__name__)
transform = self.get_transform()
transOffset = self._transOffset
offsets = self._offsets
if self.have_units():
if len(self._offsets):
xs = self.convert_xunits(self._offsets[:0])
ys = self.convert_yunits(self._offsets[:1])
offsets = zip(xs, ys)
offsets = np.asarray(offsets, np.float_)
if self.check_update('array'):
self.update_scalarmappable()
clippath, clippath_trans = self.get_transformed_clip_path_and_affine()
if clippath_trans is not None:
clippath_trans = clippath_trans.frozen()
if not transform.is_affine:
coordinates = self._coordinates.reshape(
(self._coordinates.shape[0] *
self._coordinates.shape[1],
2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not transOffset.is_affine:
offsets = transOffset.transform_non_affine(offsets)
transOffset = transOffset.get_affine()
renderer.draw_quad_mesh(
transform.frozen(), self.clipbox, clippath, clippath_trans,
self._meshWidth, self._meshHeight, coordinates,
offsets, transOffset, self.get_facecolor(), self._antialiased,
self._showedges)
renderer.close_group(self.__class__.__name__)
class PolyCollection(Collection):
def __init__(self, verts, sizes = None, closed = True, **kwargs):
"""
*verts* is a sequence of ( *verts0*, *verts1*, ...) where
*verts_i* is a sequence of *xy* tuples of vertices, or an
equivalent :mod:`numpy` array of shape (*nv*, 2).
*sizes* is *None* (default) or a sequence of floats that
scale the corresponding *verts_i*. The scaling is applied
before the Artist master transform; if the latter is an identity
transform, then the overall scaling is such that if
*verts_i* specify a unit square, then *sizes_i* is the area
of that square in points^2.
If len(*sizes*) < *nv*, the additional values will be
taken cyclically from the array.
*closed*, when *True*, will explicitly close the polygon.
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_verts(verts, closed)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def set_verts(self, verts, closed=True):
'''This allows one to delay initialization of the vertices.'''
if closed:
self._paths = []
for xy in verts:
if np.ma.isMaskedArray(xy):
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.ma.concatenate([xy, [xy[0]]])
else:
xy = np.asarray(xy)
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
self._paths.append(mpath.Path(xy))
else:
self._paths = [mpath.Path(xy) for xy in verts]
def get_paths(self):
return self._paths
def draw(self, renderer):
if self._sizes is not None:
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0))
for x in self._sizes]
return Collection.draw(self, renderer)
class BrokenBarHCollection(PolyCollection):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
def __init__(self, xranges, yrange, **kwargs):
"""
*xranges*
sequence of (*xmin*, *xwidth*)
*yrange*
*ymin*, *ywidth*
%(Collection)s
"""
ymin, ywidth = yrange
ymax = ymin + ywidth
verts = [ [(xmin, ymin), (xmin, ymax), (xmin+xwidth, ymax), (xmin+xwidth, ymin), (xmin, ymin)] for xmin, xwidth in xranges]
PolyCollection.__init__(self, verts, **kwargs)
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
@staticmethod
def span_where(x, ymin, ymax, where, **kwargs):
"""
Create a BrokenBarHCollection to plot horizontal bars from
over the regions in *x* where *where* is True. The bars range
on the y-axis from *ymin* to *ymax*
A :class:`BrokenBarHCollection` is returned.
*kwargs* are passed on to the collection
"""
xranges = []
for ind0, ind1 in mlab.contiguous_regions(where):
xslice = x[ind0:ind1]
if not len(xslice):
continue
xranges.append((xslice[0], xslice[-1]-xslice[0]))
collection = BrokenBarHCollection(xranges, [ymin, ymax-ymin], **kwargs)
return collection
class RegularPolyCollection(Collection):
"""Draw a collection of regular polygons with *numsides*."""
_path_generator = mpath.Path.unit_regular_polygon
def __init__(self,
numsides,
rotation = 0 ,
sizes = (1,),
**kwargs):
"""
*numsides*
the number of sides of the polygon
*rotation*
the rotation of the polygon in radians
*sizes*
gives the area of the circle circumscribing the
regular polygon in points^2
%(Collection)s
Example: see :file:`examples/dynamic_collection.py` for
complete example::
offsets = np.random.rand(20,2)
facecolors = [cm.jet(x) for x in np.random.rand(20)]
black = (0,0,0,1)
collection = RegularPolyCollection(
numsides=5, # a pentagon
rotation=0, sizes=(50,),
facecolors = facecolors,
edgecolors = (black,),
linewidths = (1,),
offsets = offsets,
transOffset = ax.transData,
)
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self._numsides = numsides
self._paths = [self._path_generator(numsides)]
self._rotation = rotation
self.set_transform(transforms.IdentityTransform())
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
self._transforms = [
transforms.Affine2D().rotate(-self._rotation).scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
def get_numsides(self):
return self._numsides
def get_rotation(self):
return self._rotation
def get_sizes(self):
return self._sizes
class StarPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular stars with *numsides* points."""
_path_generator = mpath.Path.unit_regular_star
class AsteriskPolygonCollection(RegularPolyCollection):
"""
Draw a collection of regular asterisks with *numsides* points."""
_path_generator = mpath.Path.unit_regular_asterisk
class LineCollection(Collection):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
zorder = 2
def __init__(self, segments, # Can be None.
linewidths = None,
colors = None,
antialiaseds = None,
linestyles = 'solid',
offsets = None,
transOffset = None,
norm = None,
cmap = None,
pickradius = 5,
**kwargs
):
"""
*segments*
a sequence of (*line0*, *line1*, *line2*), where::
linen = (x0, y0), (x1, y1), ... (xm, ym)
or the equivalent numpy array with two columns. Each line
can be a different length.
*colors*
must be a sequence of RGBA tuples (eg arbitrary color
strings, etc, not allowed).
*antialiaseds*
must be a sequence of ones or zeros
*linestyles* [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
a string or dash tuple. The dash tuple is::
(offset, onoffseq),
where *onoffseq* is an even length tuple of on and off ink
in points.
If *linewidths*, *colors*, or *antialiaseds* is None, they
default to their rcParams setting, in sequence form.
If *offsets* and *transOffset* are not None, then
*offsets* are transformed by *transOffset* and applied after
the segments have been transformed to display coordinates.
If *offsets* is not None but *transOffset* is None, then the
*offsets* are added to the segments before any transformation.
In this case, a single offset can be specified as::
offsets=(xo,yo)
and this value will be added cumulatively to each successive
segment, so as to produce a set of successively offset curves.
*norm*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*cmap*
None (optional for :class:`matplotlib.cm.ScalarMappable`)
*pickradius* is the tolerance for mouse clicks picking a line.
The default is 5 pt.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix
:attr:`~matplotlib.cm.ScalarMappable._A` is not None (ie a call to
:meth:`~matplotlib.cm.ScalarMappable.set_array` has been made), at
draw time a call to scalar mappable will be made to set the colors.
"""
if colors is None: colors = mpl.rcParams['lines.color']
if linewidths is None: linewidths = (mpl.rcParams['lines.linewidth'],)
if antialiaseds is None: antialiaseds = (mpl.rcParams['lines.antialiased'],)
self.set_linestyles(linestyles)
colors = _colors.colorConverter.to_rgba_array(colors)
Collection.__init__(
self,
edgecolors=colors,
linewidths=linewidths,
linestyles=linestyles,
antialiaseds=antialiaseds,
offsets=offsets,
transOffset=transOffset,
norm=norm,
cmap=cmap,
pickradius=pickradius,
**kwargs)
self.set_facecolors([])
self.set_segments(segments)
def get_paths(self):
return self._paths
def set_segments(self, segments):
if segments is None: return
_segments = []
for seg in segments:
if not np.ma.isMaskedArray(seg):
seg = np.asarray(seg, np.float_)
_segments.append(seg)
if self._uniform_offsets is not None:
_segments = self._add_offsets(_segments)
self._paths = [mpath.Path(seg) for seg in _segments]
set_verts = set_segments # for compatibility with PolyCollection
def _add_offsets(self, segs):
offsets = self._uniform_offsets
Nsegs = len(segs)
Noffs = offsets.shape[0]
if Noffs == 1:
for i in range(Nsegs):
segs[i] = segs[i] + i * offsets
else:
for i in range(Nsegs):
io = i%Noffs
segs[i] = segs[i] + offsets[io:io+1]
return segs
def set_color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
self._edgecolors = _colors.colorConverter.to_rgba_array(c)
def color(self, c):
"""
Set the color(s) of the line collection. *c* can be a
matplotlib color arg (all patches have same color), or a
sequence or rgba tuples; if it is a sequence the patches will
cycle through the sequence
ACCEPTS: matplotlib color arg or sequence of rgba tuples
"""
warnings.warn('LineCollection.color deprecated; use set_color instead')
return self.set_color(c)
def get_color(self):
return self._edgecolors
get_colors = get_color # for compatibility with old versions
class CircleCollection(Collection):
"""
A collection of circles, drawn using splines.
"""
def __init__(self, sizes, **kwargs):
"""
*sizes*
Gives the area of the circle in points^2
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._sizes = sizes
self.set_transform(transforms.IdentityTransform())
self._paths = [mpath.Path.unit_circle()]
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def draw(self, renderer):
# sizes is the area of the circle circumscribing the polygon
# in points^2
self._transforms = [
transforms.Affine2D().scale(
(np.sqrt(x) * self.figure.dpi / 72.0) / np.sqrt(np.pi))
for x in self._sizes]
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class EllipseCollection(Collection):
"""
A collection of ellipses, drawn using splines.
"""
def __init__(self, widths, heights, angles, units='points', **kwargs):
"""
*widths*: sequence
half-lengths of first axes (e.g., semi-major axis lengths)
*heights*: sequence
half-lengths of second axes
*angles*: sequence
angles of first axes, degrees CCW from the X-axis
*units*: ['points' | 'inches' | 'dots' | 'width' | 'height' | 'x' | 'y']
units in which majors and minors are given; 'width' and 'height'
refer to the dimensions of the axes, while 'x' and 'y'
refer to the *offsets* data units.
Additional kwargs inherited from the base :class:`Collection`:
%(Collection)s
"""
Collection.__init__(self,**kwargs)
self._widths = np.asarray(widths).ravel()
self._heights = np.asarray(heights).ravel()
self._angles = np.asarray(angles).ravel() *(np.pi/180.0)
self._units = units
self.set_transform(transforms.IdentityTransform())
self._transforms = []
self._paths = [mpath.Path.unit_circle()]
self._initialized = False
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def _init(self):
def on_dpi_change(fig):
self._transforms = []
self.figure.callbacks.connect('dpi_changed', on_dpi_change)
self._initialized = True
def set_transforms(self):
if not self._initialized:
self._init()
self._transforms = []
ax = self.axes
fig = self.figure
if self._units in ('x', 'y'):
if self._units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
sc = dx1/dx0
else:
if self._units == 'inches':
sc = fig.dpi
elif self._units == 'points':
sc = fig.dpi / 72.0
elif self._units == 'width':
sc = ax.bbox.width
elif self._units == 'height':
sc = ax.bbox.height
elif self._units == 'dots':
sc = 1.0
else:
raise ValueError('unrecognized units: %s' % self._units)
_affine = transforms.Affine2D
for x, y, a in zip(self._widths, self._heights, self._angles):
trans = _affine().scale(x * sc, y * sc).rotate(a)
self._transforms.append(trans)
def draw(self, renderer):
if True: ###not self._transforms:
self.set_transforms()
return Collection.draw(self, renderer)
def get_paths(self):
return self._paths
class PatchCollection(Collection):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
def __init__(self, patches, match_original=False, **kwargs):
"""
*patches*
a sequence of Patch objects. This list may include
a heterogeneous assortment of different patch types.
*match_original*
If True, use the colors and linewidths of the original
patches. If False, new colors may be assigned by
providing the standard collection arguments, facecolor,
edgecolor, linewidths, norm or cmap.
If any of *edgecolors*, *facecolors*, *linewidths*,
*antialiaseds* are None, they default to their
:data:`matplotlib.rcParams` patch setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional.
If the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not
None (ie a call to set_array has been made), at draw time a
call to scalar mappable will be made to set the face colors.
"""
if match_original:
def determine_facecolor(patch):
if patch.fill:
return patch.get_facecolor()
return [0, 0, 0, 0]
facecolors = [determine_facecolor(p) for p in patches]
edgecolors = [p.get_edgecolor() for p in patches]
linewidths = [p.get_linewidths() for p in patches]
antialiaseds = [p.get_antialiased() for p in patches]
Collection.__init__(
self,
edgecolors=edgecolors,
facecolors=facecolors,
linewidths=linewidths,
linestyles='solid',
antialiaseds = antialiaseds)
else:
Collection.__init__(self, **kwargs)
paths = [p.get_transform().transform_path(p.get_path())
for p in patches]
self._paths = paths
def get_paths(self):
return self._paths
artist.kwdocd['Collection'] = patchstr = artist.kwdoc(Collection)
for k in ('QuadMesh', 'PolyCollection', 'BrokenBarHCollection', 'RegularPolyCollection',
'StarPolygonCollection', 'PatchCollection', 'CircleCollection'):
artist.kwdocd[k] = patchstr
artist.kwdocd['LineCollection'] = artist.kwdoc(LineCollection)
|
agpl-3.0
|
elmadjian/pcs5735
|
linear_regression.py
|
1
|
3359
|
import sys, re
import matplotlib.pyplot as plt
import numpy as np
theta_list = []
x_list = []
y_list = []
def main():
if len(sys.argv) != 2:
print("modo de usar: <este_programa> <arquivo_csv>")
sys.exit()
csv_file = sys.argv[1]
with open(csv_file, "r") as arquivo:
classes = arquivo.readline().split(",")
theta_list = [0.0 for i in range(len(classes))]
for line in arquivo:
values = line.split(",")
curr_x = [float(i) for i in values]
curr_x[-1] = 1.0
x_list.append(curr_x)
y_list.append(float(values[-1]))
#print("x_list:", x_list, "\n\ny_list:", y_list, "\n\ntheta_list:", theta_list)
#batch_gradient_descent(theta_list, x_list, y_list, 0.0005, 0.0000001)
#stochastic_gradient_descent(theta_list, x_list, y_list, 0.0005, 0.00001)
theta_list = normal_equations(x_list, y_list)
print(theta_list)
plot(theta_list, x_list, y_list)
#--------------------------------
def J(theta_list, x_list, y_list):
sigma = 0
for i in range(len(x_list)):
sigma += (h_theta(theta_list, x_list[i]) - y_list[i])**2
return sigma / 2
#--------------------------------
def h_theta(theta_list, x_list_i):
return np.dot(theta_list, x_list_i)
#--------------------------------
def batch_gradient_descent(theta_list, x_list, y_list, alpha, epsilon):
J_prev = 0
J_curr = J(theta_list, x_list, y_list)
count = 0
while (abs(J_curr - J_prev) > epsilon):
#print(count)
count+=1
if count > 10000:
print("too much iterations")
break
for j in range(len(theta_list)):
sigma = 0
for i in range(len(x_list)):
h = h_theta(theta_list, x_list[i])
sigma += (h - y_list[i]) * x_list[i][j]
#print("h>>", h)
theta_list[j] = theta_list[j] - alpha * sigma
J_prev = J_curr
J_curr = J(theta_list, x_list, y_list)
#--------------------------------
def stochastic_gradient_descent(theta_list, x_list, y_list, alpha, epsilon):
J_prev = 0
J_curr = J(theta_list, x_list, y_list)
count = 0
while (abs(J_curr - J_prev) > epsilon):
#print(count)
count+=1
if count > 10000:
print("too much iterations")
break
for j in range(len(theta_list)):
for i in range(len(x_list)):
diff = (h_theta(theta_list, x_list[i]) - y_list[i])
theta_list[j] = theta_list[j] - alpha * diff * x_list[i][j]
J_prev = J_curr
J_curr = J(theta_list, x_list, y_list)
#--------------------------------
def normal_equations(x_list, y_list):
X = np.array(x_list)
y = np.array(y_list)
return np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
#--------------------------------
def plot(theta_list, x_list, y_list):
new_x_list = [i[0] for i in x_list]
plt.plot(new_x_list, y_list, 'ro')
#x_list.sort()
#for x in x_list:
# print("x:", x, "thetaTx:", np.dot(theta_list, x))
plt.plot(new_x_list, [np.dot((theta_list[0], theta_list[1]), (i[0], i[1])) for i in x_list])
plt.title("Regressão sobre os dados em 'height.csv'")
plt.xlabel("Idade")
plt.ylabel("Altura")
plt.show()
if __name__=="__main__":
main()
|
mpl-2.0
|
jimgoo/zipline-fork
|
zipline/examples/dual_moving_average.py
|
1
|
4437
|
#!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dual Moving Average Crossover algorithm.
This algorithm buys apple once its short moving average crosses
its long moving average (indicating upwards momentum) and sells
its shares once the averages cross again (indicating downwards
momentum).
"""
from zipline.api import order_target, record, symbol, history, add_history
def initialize(context):
# Register 2 histories that track daily prices,
# one with a 100 window and one with a 300 day window
add_history(100, '1d', 'price')
add_history(300, '1d', 'price')
context.sym = symbol('AAPL')
context.i = 0
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = history(100, '1d', 'price').mean()
long_mavg = history(300, '1d', 'price').mean()
# Trading logic
if short_mavg[context.sym] > long_mavg[context.sym]:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(context.sym, 100)
elif short_mavg[context.sym] < long_mavg[context.sym]:
order_target(context.sym, 0)
# Save values for later inspection
record(AAPL=data[context.sym].price,
short_mavg=short_mavg[context.sym],
long_mavg=long_mavg[context.sym])
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze2(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if ('AAPL' in results and 'short_mavg' in results and
'long_mavg' in results):
results['AAPL'].plot(ax=ax2)
results[['short_mavg', 'long_mavg']].plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[[t[0]['amount'] > 0 for t in
trans.transactions]]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]]
ax2.plot(buys.index, results.short_mavg.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, results.short_mavg.ix[sells.index],
'v', markersize=10, color='k')
plt.legend(loc=0)
else:
msg = 'AAPL, short_mavg & long_mavg data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def analyze(context=None, results=None):
print(results.head())
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Set the simulation start and end dates.
start = datetime(2009, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2013, 1, 1, 0, 0, 0, 0, pytz.utc)
print start
print end
# Load price data from yahoo.
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=['AAPL'])
results = algo.run(data)
# Plot the portfolio and asset data.
analyze(results=results)
|
apache-2.0
|
legacysurvey/pipeline
|
py/obiwan/decals_sim_plots.py
|
2
|
32664
|
#!/usr/bin/env python
"""Analyze the output of decals_simulations.
EXAMPLE
=======
8 500 star chunks for brick 2523p355 are here
/project/projectdirs/desi/image_sims/2523p355
you can analyze them like this:
export DECALS_SIM_DIR=/project/projectdirs/desi/image_sims
python legacyanalysis/decals_sim_plots.py -b 2523p355 -o STAR -out your/relative/output/path
out is optional, default is brickname/objtype
Missing object and annotated coadd plots
========================================
python legacyanalysis/decals_sim_plots.py ... --extra_plots
default is to NOT make them because chunks > 50
"""
from __future__ import division, print_function
import matplotlib
matplotlib.use('Agg') # display backend
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import matplotlib.image as mpimg
import os
import sys
import pdb
import logging
import argparse
import glob
import numpy as np
from astropy.io import fits
from astropy.table import vstack, Table
from astropy import units
from astropy.coordinates import SkyCoord
# import seaborn as sns
from PIL import Image, ImageDraw
import photutils
from theValidator.catalogues import CatalogueFuncs
def flux2mag(nanoflux):
'''converts flux in tractor nanomaggie units to AB mag'''
return 22.5-2.5*np.log10(nanoflux)
def basic_cut(tractor):
'''return boolean indices for which to keep and throw out'''
flux= tractor['decam_flux'][:,[1,2,4]]
grz_anymask= tractor['decam_anymask'][:,[1,2,4]]
grz_nobs= tractor['decam_nobs'][:,[1,2,4]]
b_good= np.all((flux[:,0] > 0, flux[:,1] > 0, flux[:,2] > 0, \
grz_anymask[:,0] == 0,grz_anymask[:,1] ==0,grz_anymask[:,2] == 0),axis=0)
#grz_nobs[:,0] > 1,grz_nobs[:,1] > 1,grz_nobs[:,2] > 1,\
b_bad= b_good == False
return b_good,b_bad
def bright_dmag_cut(matched_simcat,matched_tractor):
'''return:
1) median dmag of bright sources
2) indices of sources that are bright AND |dmag[band]| -|median_dmag[band]| > 0.005 in ANY band
3) indices of sources that are bright AND |dmag[band]| -|median_dmag[band]| <= 0.005 in ALL bands'''
bright= dict(g=20.,r=19.,z=18.)
b_good,junk= basic_cut(matched_tractor)
# Store median dmag of bright sources
med,b_bright={},{}
for band,iband in zip(['g','r','z'],[1,2,4]):
# Cut to bright and good sources
inputflux = matched_simcat[band+'flux']
inputmag = 22.5-2.5*np.log10(inputflux)
b_bright[band]= inputmag < bright[band]
b_bright[band]= np.all((b_bright[band],b_good),axis=0)
#i_bright= np.where(b_bright)[0]
#print('type(i_bright)= ',type(i_bright),"i_bright=",i_bright)
# Compute median for each band
inputflux = matched_simcat[band+'flux'][b_bright[band]]
tractorflux = matched_tractor['decam_flux'][:, iband][b_bright[band]]
mag_diff= -2.5*np.log10(tractorflux/inputflux)
med[band]= np.percentile(mag_diff,q=50)
# Boolean mask for each band
b={}
for band,iband in zip(['g','r','z'],[1,2,4]):
inputflux = matched_simcat[band+'flux']
tractorflux = matched_tractor['decam_flux'][:, iband]
mag_diff= -2.5*np.log10(tractorflux/inputflux)
b[band]= np.abs(mag_diff) - abs(med[band]) > 0.001
# total boolean mask
b_bright= np.any((b_bright['g'],b_bright['r'],b_bright['z']),axis=0)
b_large_dmag= np.all((b_bright,b_good,b['g'],b['r'],b['z']),axis=0)
b_small_dmag= np.all((b_bright,b_good,b['g']==False,b['r']==False,b['z']==False),axis=0)
return med,np.where(b_large_dmag)[0],np.where(b_small_dmag)[0]
def plot_cutouts_by_index(simcat,index, brickname,lobjtype,chunksuffix, \
indir=None, img_name='simscoadd',qafile='test.png'):
hw = 30 # half-width [pixels]
rad = 14
ncols = 5
nrows = 5
nthumb = ncols*nrows
dims = (ncols*hw*2,nrows*hw*2)
mosaic = Image.new('RGB',dims)
xpos, ypos = np.meshgrid(np.arange(0, dims[0], hw*2, dtype='int'),
np.arange(0, dims[1], hw*2, dtype='int'))
im = Image.open( os.path.join(indir, 'qa-{}-{}-{}-{:02d}.jpg'.format(brickname, lobjtype, img_name, int(chunksuffix))) )
sz = im.size
iobj = 0
for ic in range(ncols):
if iobj >= len(index) or iobj >= ncols*nrows: break
for ir in range(nrows):
if iobj >= len(index) or iobj >= ncols*nrows: break
xx = int(simcat['X'][index[iobj]])
yy = int(sz[1]-simcat['Y'][index[iobj]])
crop = (xx-hw, yy-hw, xx+hw, yy+hw)
box = (xpos[ir, ic], ypos[ir, ic])
thumb = im.crop(crop)
mosaic.paste(thumb, box)
iobj+= 1
# Add a border and circle the missing source.
draw = ImageDraw.Draw(mosaic)
sz = mosaic.size
for ic in range(ncols):
for ir in range(nrows):
draw.rectangle([(xpos[ir, ic], ypos[ir, ic]),
(xpos[ir, ic]+hw*2, ypos[ir, ic]+hw*2)])
xx = xpos[ir, ic] + hw
yy = ypos[ir, ic] + hw
draw.ellipse((xx-rad, sz[1]-yy-rad, xx+rad, sz[1]-yy+rad), outline='yellow')
mosaic.save(qafile)
def plot_annotated_coadds(simcat, brickname, lobjtype, chunksuffix,\
indir=None,img_name='simscoadd',qafile='test.png'):
rad = 7/0.262
#imfile = os.path.join(cdir, 'qa-{}-{}-{}-{}.jpg'.format(brickname, lobjtype, suffix, chunksuffix))
# HARDCODED fix this!!!!!
imfile = os.path.join(indir, 'qa-{}-{}-{}-{:02d}.jpg'.format(brickname, lobjtype, img_name, int(chunksuffix)))
im = Image.open(imfile)
sz = im.size
draw = ImageDraw.Draw(im)
[draw.ellipse((cat['X']-rad, sz[1]-cat['Y']-rad, cat['X']+rad,
sz[1]-cat['Y']+rad), outline='yellow') for cat in simcat]
im.save(qafile)
def bin_up(data_bin_by,data_for_percentile, bin_minmax=(18.,26.),nbins=20):
'''bins "data_for_percentile" into "nbins" using "data_bin_by" to decide how indices are assigned to bins
returns bin center,N,q25,50,75 for each bin
'''
bin_edges= np.linspace(bin_minmax[0],bin_minmax[1],num= nbins+1)
vals={}
for key in ['q50','q25','q75','n']: vals[key]=np.zeros(nbins)+np.nan
vals['binc']= (bin_edges[1:]+bin_edges[:-1])/2.
for i,low,hi in zip(range(nbins), bin_edges[:-1],bin_edges[1:]):
keep= np.all((low < data_bin_by,data_bin_by <= hi),axis=0)
if np.where(keep)[0].size > 0:
vals['n'][i]= np.where(keep)[0].size
vals['q25'][i]= np.percentile(data_for_percentile[keep],q=25)
vals['q50'][i]= np.percentile(data_for_percentile[keep],q=50)
vals['q75'][i]= np.percentile(data_for_percentile[keep],q=75)
else:
vals['n'][i]=0
return vals
def plot_injected_mags(allsimcat, log,qafile='test.png'):
gr_sim = -2.5*np.log10(allsimcat['gflux']/allsimcat['rflux'])
rz_sim = -2.5*np.log10(allsimcat['rflux']/allsimcat['zflux'])
grrange = (-0.2, 2.0)
rzrange = (-0.4, 2.5)
fig, ax = plt.subplots(2,1,figsize=(6,8))
ax[0].hist(22.5-2.5*np.log10(allsimcat['rflux']),bins=20,align='mid')
ax[1].scatter(rz_sim,gr_sim,
s=10,edgecolor='b',c='none',lw=1.)
for i,x_lab,y_lab in zip(range(2),['r AB','r-z'],['N','g-r']):
xlab=ax[i].set_xlabel(x_lab)
ylab=ax[i].set_ylabel(y_lab)
ax[1].set_xlim(rzrange)
ax[1].set_ylim(grrange)
fig.subplots_adjust(wspace=0.25)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile,bbox_extra_artists=[xlab,ylab], bbox_inches='tight')
plt.close()
def plot_good_bad_ugly(allsimcat,bigsimcat,bigsimcat_missing, nmagbin,rminmax, b_good,b_bad, log,qafile='test.png'):
#rmaghist, magbins = np.histogram(allsimcat['r'], bins=nmagbin, range=rminmax)
bigsimcat_R= flux2mag(bigsimcat['rflux'])
bigsimcat_miss_R= flux2mag(bigsimcat_missing['rflux'])
found=dict(good={},bad={},missed={})
for index,name in zip([b_good,b_bad],['good','bad']):
# bin on true r mag of matched objects, count objects in each bin
found[name]= bin_up(bigsimcat_R[index],bigsimcat_R[index], bin_minmax=rminmax,nbins=nmagbin) # bin_edges=magbins)
name='missed'
found[name]= bin_up(bigsimcat_miss_R,bigsimcat_miss_R, bin_minmax=rminmax,nbins=nmagbin) #bin_edges=magbins)
fig, ax = plt.subplots(1, figsize=(8,6))
for name,color in zip(['good','bad','missed'],['k','b','r']):
ax.step(found[name]['binc'],found[name]['n'], c=color,lw=2,label=name)
xlab=ax.set_xlabel('Input r AB')
ylab=ax.set_ylabel('Number Recovered')
leg=ax.legend(loc=(0.,1.01),ncol=3)
#fig.subplots_adjust(bottom=0.15)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile, bbox_extra_artists=[leg,xlab,ylab], bbox_inches='tight')
plt.close()
def plot_tractor_minus_answer(bigsimcat,bigtractor, b_good,rminmax, log,qafile='test.png'):
fig, ax = plt.subplots(3, sharex=True, figsize=(6,8))
col = ['b', 'k', 'c', 'm', 'y', 0.8]
rmag = bigsimcat['rflux']
for thisax, thiscolor, band, indx in zip(ax, col, ('g','r','z'), (1, 2, 4)):
inputflux = bigsimcat[band+'flux']
tractorflux = bigtractor['decam_flux'][:, indx]
tractorivar = bigtractor['decam_flux_ivar'][:, indx]
#import pickle
#fout=open('test.pickle','w')
#pickle.dump((tractorflux,inputflux,b_good),fout)
#fout.close()
#print('exiting early')
#sys.exit()
inputmag = 22.5-2.5*np.log10(inputflux[b_good])
mag_diff= -2.5*np.log10(tractorflux[b_good]/inputflux[b_good])
thisax.scatter(inputmag, mag_diff,
s=10,edgecolor=thiscolor,c='none',lw=1.)
thisax.set_ylim(-0.1,0.1)
thisax.set_xlim(inputmag.min()-0.1, inputmag.max()+0.1)
thisax.axhline(y=0.0,lw=2,ls='solid',color='gray')
#arr= np.ma.masked_array(mag_diff, mask= np.isfinite(mag_diff) == False)
#med= np.median(arr)
#print('arr=',arr)
med,junk1,junk2= bright_dmag_cut(bigsimcat,bigtractor)
thisax.axhline(y=med[band],lw=2,ls='dashed',color='red',label='Median=%.3f' % med[band])
thisax.legend(loc='upper left',fontsize='x-small')
#thisax.text(0.05,0.05, band.lower(), horizontalalignment='left',
#verticalalignment='bottom',transform=thisax.transAxes,
#fontsize=16)
ax[0].set_ylabel('$\Delta$g')
ax[1].set_ylabel('$\Delta$r (Tractor - Input)')
ylab=ax[2].set_ylabel('$\Delta$z')
xlab=ax[2].set_xlabel('Input magnitude (AB mag)')
fig.subplots_adjust(left=0.18,hspace=0.1)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile,bbox_extra_artists=[xlab,ylab], bbox_inches='tight')
plt.close()
def plot_chi(bigsimcat,bigtractor, b_good,rminmax, log,qafile='test.png'):
col = ['b', 'k', 'c', 'm', 'y', 0.8]
fig, ax = plt.subplots(3, sharex=True, figsize=(6,8))
rmag = flux2mag(bigsimcat['rflux'])
for thisax, thiscolor, band, indx in zip(ax, col, ('g', 'r', 'z'), (1, 2, 4)):
simflux = bigsimcat[band+'flux']
tractorflux = bigtractor['decam_flux'][:, indx]
tractorivar = bigtractor['decam_flux_ivar'][:, indx]
#thisax.scatter(rmag[bcut], -2.5*np.log10(tractorflux[bcut]/simflux[bcut]),
# s=10,edgecolor=newcol,c='none',lw=1.,label=label)
thisax.scatter(rmag[b_good], (tractorflux[b_good] - simflux[b_good])*np.sqrt(tractorivar[b_good]),
s=10,edgecolor=thiscolor,c='none',lw=1.)
#thisax.set_ylim(-0.7,0.7)
thisax.set_ylim(-4,4)
thisax.set_xlim(rminmax + [-0.1, 0.0])
thisax.axhline(y=0.0,lw=2,ls='solid',color='gray')
#thisax.text(0.05,0.05, band.lower(), horizontalalignment='left',
#verticalalignment='bottom',transform=thisax.transAxes,
#fontsize=16)
for i,b in enumerate(['g','r','z']):
ylab=ax[i].set_ylabel(r'%s: $(F_{tractor} - F)/\sigma_{tractor}$' % b)
#ax[0].set_ylabel('$\Delta$g')
#ax[1].set_ylabel('$\Delta$r (Tractor minus Input)')
#ax[2].set_ylabel('$\Delta$z')
xlab=ax[2].set_xlabel('Input r magnitude (AB mag)')
fig.subplots_adjust(left=0.18,hspace=0.1)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile,bbox_extra_artists=[xlab,ylab], bbox_inches='tight')
plt.close()
def plot_color_tractor_minus_answer(bigtractor,bigsimcat, rminmax, brickname,lobjtype, log,qafile='test.png'):
gr_tra = -2.5*np.log10(bigtractor['decam_flux'][:, 1]/bigtractor['decam_flux'][:, 2])
rz_tra = -2.5*np.log10(bigtractor['decam_flux'][:, 2]/bigtractor['decam_flux'][:, 4])
gr_sim = -2.5*np.log10(bigsimcat['gflux']/bigsimcat['rflux'])
rz_sim = -2.5*np.log10(bigsimcat['rflux']/bigsimcat['zflux'])
rmag = flux2mag(bigsimcat['rflux'])
col = ['b', 'k', 'c', 'm', 'y', 0.8]
fig, ax = plt.subplots(2,sharex=True,figsize=(6,8))
ax[0].scatter(rmag, gr_tra-gr_sim, color=col[0], s=10)
ax[1].scatter(rmag, rz_tra-rz_sim, color=col[1], s=10)
[thisax.set_ylim(-0.7,0.7) for thisax in ax]
[thisax.set_xlim(rminmax + [-0.1, 0.0]) for thisax in ax]
[thisax.axhline(y=0.0, lw=2, ls='solid', color='gray') for thisax in ax]
ax[0].set_ylabel('$\Delta$(g - r) (Tractor minus Input)')
ax[1].set_ylabel('$\Delta$(r - z) (Tractor minus Input)')
ax[1].set_xlabel('Input r magnitude (AB mag)')
fig.subplots_adjust(left=0.18,hspace=0.1)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile)
plt.close()
def plot_fraction_recovered(allsimcat,bigsimcat, nmagbin,rminmax, brickname, lobjtype, log,qafile='test.png'):
allsimcat_R= flux2mag(allsimcat['rflux'])
bigsimcat_R= flux2mag(bigsimcat['rflux'])
rmaghist, magbins = np.histogram(allsimcat_R, bins=nmagbin, range=rminmax)
cmagbins = (magbins[:-1] + magbins[1:]) / 2.0
ymatch, binsmatch = np.histogram(bigsimcat_R, bins=nmagbin, range=rminmax)
fig, ax = plt.subplots(1, figsize=(8,6))
ax.step(cmagbins, 1.0*ymatch/rmaghist, c='k',lw=3,label='All objects')
#ax.step(cmagbins, 1.0*ymatchgood/rmaghist, lw=3, ls='dashed', label='|$\Delta$m|<0.3')
ax.axhline(y=1.0,lw=2,ls='dashed',color='k')
ax.set_xlabel('Input r magnitude (AB mag)')
ax.set_ylabel('Fraction Recovered'.format(lobjtype))
ax.set_ylim([0.0, 1.1])
ax.legend('lower left')
fig.subplots_adjust(bottom=0.15)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile)
plt.close()
def plot_sn_recovered(allsimcat,bigsimcat,bigtractor, brickname, lobjtype, log,qafile='test.png'):
allsimcat_R= flux2mag(allsimcat['rflux'])
# min,max mag of all bands
grrange = (-0.2, 2.0)
rzrange = (-0.4, 2.5)
rmin,rmax= allsimcat_R.min(), allsimcat_R.max()
mag_min= np.min((rmin,rmin+grrange[0],rmin-rzrange[1]))
mag_max= np.max((rmax,rmax+grrange[1],rmax-rzrange[0]))
s2n=dict(g={},r={},z={})
for band,ith in zip(['g','r','z'],[1,2,4]):
mag= 22.5-2.5*np.log10(bigsimcat[band+'flux'])
# HARDCODED mag range
s2n[band]= bin_up(mag, bigtractor['decam_flux'][:,ith]*np.sqrt(bigtractor['decam_flux_ivar'][:,ith]), \
bin_minmax=(18,26),nbins=20)
fig, ax = plt.subplots(1, figsize=(8,6))
xlab=ax.set_xlabel('Input magnitude (AB)')
ylab=ax.set_ylabel(r'Median S/N = $F/\sigma$',fontweight='bold',fontsize='large')
title= ax.set_title('S/N of Recovered Objects')
for band,color in zip(['g','r','z'],['g','r','b']):
ax.plot(s2n[band]['binc'], s2n[band]['q50'],c=color,ls='-',lw=2,label=band)
#ax.fill_between(s2n[band]['cbin'],s2n[band]['q25'],s2n[band]['q75'],color=color,alpha=0.25)
ax.axhline(y=5.,lw=2,ls='dashed',color='k',label='S/N = 5')
ax.set_yscale('log')
leg=ax.legend(loc=3)
fig.subplots_adjust(bottom=0.15)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile,bbox_extra_artists=[leg,xlab,ylab,title], bbox_inches='tight',dpi=150)
plt.close()
def plot_recovered_types(bigsimcat,bigtractor, nmagbin,rminmax, objtype,log,qafile='test.png'):
bigsimcat_R= flux2mag(bigsimcat['rflux'])
fig = plt.figure(figsize=(8, 6))
ax = fig.gca()
rmaghist, magbins = np.histogram(bigsimcat_R, bins=nmagbin, range=rminmax)
cmagbins = (magbins[:-1] + magbins[1:]) / 2.0
tractortype = np.char.strip(bigtractor['type'].data)
for otype in ['PSF', 'SIMP', 'EXP', 'DEV', 'COMP']:
these = np.where(tractortype == otype)[0]
if len(these)>0:
yobj, binsobj = np.histogram(bigsimcat_R[these], bins=nmagbin, range=rminmax)
#plt.step(cmagbins,1.0*yobj,lw=3,alpha=0.5,label=otype)
plt.step(cmagbins,1.0*yobj/rmaghist,lw=3,alpha=0.5,label=otype)
plt.axhline(y=1.0,lw=2,ls='dashed',color='gray')
plt.xlabel('Input r magnitude (AB mag)')
#plt.ylabel('Number of Objects')
plt.ylabel('Fraction of {}s classified'.format(objtype))
plt.ylim([0.0,1.1])
plt.legend(loc='center left', bbox_to_anchor=(0.08,0.5))
fig.subplots_adjust(bottom=0.15)
log.info('Writing {}'.format(qafile))
plt.savefig(qafile)
plt.close()
def create_confusion_matrix(answer_type,predict_type, types=['PSF','SIMP','EXP','DEV','COMP'],slim=True):
'''compares classifications of matched objects, returns 2D array which is conf matrix and xylabels
return 5x5 confusion matrix and colum/row names
answer_type,predict_type -- arrays of same length with reference and prediction types'''
for typ in set(answer_type): assert(typ in types)
for typ in set(predict_type): assert(typ in types)
# if a type was not in answer (training) list then don't put in cm
if slim: ans_types= set(answer_type)
# put in cm regardless
else: ans_types= set(types)
cm=np.zeros((len(ans_types),len(types)))-1
for i_ans,ans_type in enumerate(ans_types):
ind= np.where(answer_type == ans_type)[0]
for i_pred,pred_type in enumerate(types):
n_pred= np.where(predict_type[ind] == pred_type)[0].size
if ind.size > 0: cm[i_ans,i_pred]= float(n_pred)/ind.size # ind.size is constant for loop over pred_types
else: cm[i_ans,i_pred]= np.nan
if slim: return cm,ans_types,types #size ans_types != types
else: return cm,types
def plot_confusion_matrix(cm,answer_names,all_names, log, qafile='test.png'):
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues, vmin=0,vmax=1)
cbar=plt.colorbar()
plt.xticks(range(len(all_names)), all_names)
plt.yticks(range(len(answer_names)), answer_names)
ylab=plt.ylabel('True')
xlab=plt.xlabel('Predicted (tractor)')
for row in range(len(answer_names)):
for col in range(len(all_names)):
if np.isnan(cm[row,col]):
plt.text(col,row,'n/a',va='center',ha='center')
elif cm[row,col] > 0.5:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='yellow')
else:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='black')
log.info('Writing {}'.format(qafile))
plt.savefig(qafile, bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def plot_cm_stack(cm_stack,stack_names,all_names, log, qafile='test.png'):
'''cm_stack -- list of single row confusion matrices
stack_names -- list of same len as cm_stack, names for each row of cm_stack'''
# combine list into single cm
cm=np.zeros((len(cm_stack),len(all_names)))+np.nan
for i in range(cm.shape[0]): cm[i,:]= cm_stack[i]
# make usual cm, but labels repositioned
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
cbar=plt.colorbar()
plt.xticks(range(len(all_names)), all_names)
plt.yticks(range(len(stack_names)), stack_names)
ylab=plt.ylabel('True = PSF')
xlab=plt.xlabel('Predicted (tractor)')
for row in range(len(stack_names)):
for col in range(len(all_names)):
if np.isnan(cm[row,col]):
plt.text(col,row,'n/a',va='center',ha='center')
elif cm[row,col] > 0.5:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='yellow')
else:
plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center',color='black')
#if np.isnan(cm[row,col]):
# plt.text(col,row,'n/a',va='center',ha='center')
#else: plt.text(col,row,'%.2f' % cm[row,col],va='center',ha='center')
log.info('Writing {}'.format(qafile))
plt.savefig(qafile, bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
def make_stacked_cm(bigsimcat,bigtractor, b_good, log,qafile='test.png'):
bigsimcat_R= flux2mag(bigsimcat['rflux'])
types= ['PSF ', 'SIMP', 'EXP ', 'DEV ', 'COMP']
cm_stack,stack_names=[],[]
rbins= np.array([18.,20.,22.,23.,24.])
for rmin,rmax in zip(rbins[:-1],rbins[1:]):
# master cut
br_cut= np.all((bigsimcat_R > rmin,bigsimcat_R <= rmax, b_good),axis=0)
stack_names+= ["%d < r <= %d" % (int(rmin),int(rmax))]
cm,ans_names,all_names= create_confusion_matrix(np.array(['PSF ']*bigtractor['ra'].data[br_cut].shape[0]),
bigtractor['type'].data[br_cut], \
types=types)
cm_stack+= [cm]
plot_cm_stack(cm_stack, stack_names,all_names, log, qafile=qafile)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='DECaLS simulations.')
parser.add_argument('-b', '--brick', type=str, default='2428p117', metavar='',
help='process this brick (required input)')
parser.add_argument('-o', '--objtype', type=str, choices=['star','qso','elg','lrg'], default='star', metavar='',
help='object type')
parser.add_argument('-out', '--output_dir', type=str, default=None, metavar='',
help='relative path to output directory')
parser.add_argument('-extra', '--extra_plots', action='store_true',
help='make missing, annotated, coadded plots, dont if many chunks')
parser.add_argument('-v', '--verbose', action='store_true',
help='toggle on verbose output')
args = parser.parse_args()
if args.brick is None:
parser.print_help()
sys.exit(1)
if args.extra_plots: extra_plots= True
else: extra_plots= False
print('extra_plots=',extra_plots)
# Set the debugging level
if args.verbose:
lvl = logging.DEBUG
else:
lvl = logging.INFO
logging.basicConfig(format='%(message)s', level=lvl, stream=sys.stdout)
log = logging.getLogger('__name__')
brickname = args.brick
objtype = args.objtype.upper()
lobjtype = objtype.lower()
log.info('Analyzing objtype {} on brick {}'.format(objtype, brickname))
if 'DECALS_SIM_DIR' in os.environ:
decals_sim_dir = os.getenv('DECALS_SIM_DIR')
else:
decals_sim_dir = '.'
input_dir= os.path.join(decals_sim_dir,lobjtype,brickname[:3],brickname,'rowstart0')
#if args.output_dir is None: output_dir= os.path.join(decals_sim_dir,lobjtype,brickname[:3],brickname,'qaplots_'+lobjtype)
output_dir= os.path.join(os.path.dirname(input_dir),'qaplots_'+lobjtype)
#else: output_dir= args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Plotting preferences
#sns.set(style='white',font_scale=1.6,palette='dark')#,font='fantasy')
#col = sns.color_palette('dark')
col = ['b', 'k', 'c', 'm', 'y', 0.8]
# Read metadata catalog.
metafile = os.path.join(input_dir, 'metacat-{}-{}-rowstart0.fits'.format(lobjtype,brickname))
log.info('Reading {}'.format(metafile))
meta = fits.getdata(metafile, 1)
# We need this for our histograms below
magbinsz = 0.2
rminmax = np.array([15.,24.]) #np.squeeze(meta['RMAG_RANGE'])
nmagbin = long((rminmax[1]-rminmax[0])/magbinsz)
# Work in chunks.
allsimcat = []
bigsimcat = []
bigsimcat_missed = []
bigtractor = []
chunk_dirs= glob.glob(os.path.join(os.path.dirname(input_dir),'rowstart*'))
nchunk= len(chunk_dirs)
if nchunk == 0: raise ValueError
# Loop through chunk dirs 000,001,...,999
for ichunk,cdir in enumerate([chunk_dirs[0]]):
chunksuffix = os.path.basename(cdir) #'{:02d}'.format(ichunk)
log.info('Working on chunk {:02d}/{:02d}'.format(ichunk+1, nchunk))
# Read the simulated object catalog
simcatfile = os.path.join(cdir, 'simcat-{}-{}-{}.fits'.format(lobjtype,brickname,chunksuffix))
log.info('Reading {}'.format(simcatfile))
simcat = Table(fits.getdata(simcatfile, 1))
# Read Tractor catalog
tractorfile = os.path.join(cdir, 'tractor-{}-{}-{}.fits'.format(lobjtype,brickname,chunksuffix))
log.info('Reading {}'.format(tractorfile))
tractor = Table(fits.getdata(tractorfile, 1))
# Match
cat1 = SkyCoord(ra=tractor['ra']*units.degree, dec=tractor['dec']*units.degree)
cat2 = SkyCoord(ra=simcat['ra']*units.degree, dec=simcat['dec']*units.degree)
m2, d2d, d3d = cat1.match_to_catalog_3d(cat2)
b= np.array(d2d) <= 1./3600
m2= np.array(m2)[b]
m1= np.arange(len(tractor))[b]
print('matched %d/%d' % (len(m2),len(simcat['ra'])))
missing = np.delete(np.arange(len(simcat)), m2, axis=0)
log.info('Missing {}/{} sources'.format(len(missing), len(simcat)))
#good = np.where((np.abs(tractor['decam_flux'][m1,2]/simcat['rflux'][m2]-1)<0.3)*1)
# Build matching catalogs for the plots below.
if len(bigsimcat) == 0:
bigsimcat = simcat[m2]
bigtractor = tractor[m1]
bigsimcat_missing = simcat[missing]
else:
bigsimcat = vstack((bigsimcat, simcat[m2]))
bigtractor = vstack((bigtractor, tractor[m1]))
bigsimcat_missing = vstack((bigsimcat_missing, simcat[missing]))
if len(allsimcat) == 0:
allsimcat = simcat
else:
allsimcat = vstack((allsimcat, simcat))
# Get cutouts of the bright matched sources with small/large delta mag
if extra_plots:
for img_name in ['image','resid','simscoadd']:
# Indices of large and small dmag
junk,i_large_dmag,i_small_dmag= bright_dmag_cut(simcat[m2],tractor[m1])
# Large dmag cutouts
qafile = os.path.join(output_dir, 'qa-{}-{}-{}-bright-large-dmag-{:02d}.png'.format(\
brickname, lobjtype, img_name, int(chunksuffix)))
plot_cutouts_by_index(simcat,i_large_dmag, brickname,lobjtype,chunksuffix, \
indir=cdir,img_name=img_name,qafile=qafile)
log.info('Wrote {}'.format(qafile))
# Small dmag cutouts
qafile = os.path.join(output_dir, 'qa-{}-{}-{}-bright-small-dmag-{:02d}.png'.format(\
brickname, lobjtype, img_name, int(chunksuffix)))
plot_cutouts_by_index(simcat,i_small_dmag, brickname,lobjtype,chunksuffix, \
indir=cdir,img_name=img_name,qafile=qafile)
log.info('Wrote {}'.format(qafile))
# Get cutouts of the missing sources in each chunk (if any)
if len(missing) > 0 and extra_plots:
simcat_R= flux2mag(simcat['rflux'])
for img_name in ['image']: #,'simscoadd']:
qafile = os.path.join(output_dir, 'qa-{}-{}-{}-missing-{:02d}.png'.format(\
brickname, lobjtype, img_name, int(chunksuffix)))
miss = missing[np.argsort(simcat_R[missing])]
plot_cutouts_by_index(simcat,miss, brickname,lobjtype,chunksuffix, \
indir=cdir,img_name=img_name,qafile=qafile)
log.info('Wrote {}'.format(qafile))
# Annotate the coadd image and residual files so the simulated sources
# are labeled.
if extra_plots:
for img_name in ('simscoadd','image', 'resid'):
qafile = os.path.join(output_dir, 'qa-{}-{}-{}-{:02d}-annot.png'.format(\
brickname, lobjtype,img_name, int(chunksuffix)))
plot_annotated_coadds(simcat, brickname, lobjtype, chunksuffix, \
indir=cdir,img_name=img_name,qafile=qafile)
log.info('Wrote {}'.format(qafile))
# now operate on concatenated catalogues from multiple chunks
# Grab flags
b_good,b_bad= basic_cut(bigtractor)
# mags and colors of ALL injected sources
plot_injected_mags(allsimcat, log, qafile=\
os.path.join(output_dir, 'qa-{}-{}-injected-mags.png'.format(brickname, lobjtype)))
# number of detected sources that are bad, good and number of undetected, binned by r mag
plot_good_bad_ugly(allsimcat,bigsimcat,bigsimcat_missing, nmagbin,rminmax, b_good,b_bad, log, qafile=\
os.path.join(output_dir, 'qa-{}-{}-N-good-bad-missed.png'.format(brickname, lobjtype)))
# Flux residuals vs input magnitude
plot_tractor_minus_answer(bigsimcat,bigtractor, b_good,rminmax, log, qafile=\
os.path.join(output_dir, 'qa-{}-{}-good-flux.png'.format(brickname, lobjtype)))
# chi plots: Flux residual / estimated Flux error
plot_chi(bigsimcat,bigtractor, b_good,rminmax, log, qafile=\
os.path.join(output_dir, 'qa-{}-{}-chi-good.png'.format(brickname, lobjtype)))
# Color residuals
plot_color_tractor_minus_answer(bigtractor,bigsimcat, rminmax, brickname,lobjtype, log, qafile =\
os.path.join(output_dir, 'qa-{}-{}-color.png'.format(brickname, lobjtype)))
# Fraction of recovered sources
plot_fraction_recovered(allsimcat,bigsimcat, nmagbin,rminmax, brickname, lobjtype, log, qafile =\
os.path.join(output_dir, 'qa-{}-{}-frac.png'.format(brickname, lobjtype)))
# S/N of recovered sources (S/N band vs. AB mag band)
plot_sn_recovered(allsimcat,bigsimcat,bigtractor, brickname, lobjtype, log, qafile =\
os.path.join(output_dir, 'qa-{}-{}-SN.png'.format(brickname, lobjtype)))
# Distribution of object types for matching sources.
plot_recovered_types(bigsimcat,bigtractor, nmagbin,rminmax, objtype,log, qafile =\
os.path.join(output_dir, 'qa-{}-{}-type.png'.format(brickname, lobjtype)))
# Confusion matrix for distribution of object types
# Basic cm, use slim=False
types= ['PSF ', 'SIMP', 'EXP ', 'DEV ', 'COMP']
cm,all_names= create_confusion_matrix(np.array(['PSF ']*bigtractor['ra'].data[b_good].shape[0]),
bigtractor['type'].data[b_good], \
types=types,slim=False)
qafile = os.path.join(output_dir, 'qa-{}-{}-{}-confusion.png'.format(brickname, lobjtype,'good'))
plot_confusion_matrix(cm,all_names,all_names, log,qafile)
# Now a stacked confusion matrix
# Compute a row for each r mag range and stack rows
make_stacked_cm(bigsimcat,bigtractor, b_good, log,qafile =\
os.path.join(output_dir, 'qa-{}-{}-good-confusion-stack.png'.format(brickname, lobjtype)))
'''
# Morphology plots
if objtype=='ELG':
fig = plt.figure(figsize=(8,4))
plt.subplot(1,3,1)
plt.plot(rmag,deltam,'s',markersize=3)
plt.axhline(y=0.0,lw=2,ls='solid',color='gray')
plt.xlim(rminmax)
plt.xlabel('r (AB mag)')
plt.subplot(1,3,2)
plt.plot(bigsimcat['R50_1'],deltam,'s',markersize=3)
plt.axhline(y=0.0,lw=2,ls='solid',color='gray')
plt.xlabel('$r_{50}$ (arcsec)')
plt.subplot(1,3,3)
plt.plot(bigsimcat['BA_1'],deltam,'s',markersize=3)
plt.axhline(y=0.0,lw=2,ls='solid',color='gray')
plt.xlabel('b/a')
plt.xlim([0.2,1.0])
fig.subplots_adjust(bottom=0.18)
qafile = os.path.join(output_dir,'qa-'+brickname+'-'+lobjtype+'-morph.png')
log.info('Writing {}'.format(qafile))
plt.savefig(qafile)
'''
if __name__ == "__main__":
main()
|
gpl-2.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/indexes/datetimes/test_misc.py
|
2
|
13926
|
import calendar
import locale
import unicodedata
import numpy as np
import pytest
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, compat, date_range, datetime, offsets)
import pandas.util.testing as tm
class TestTimeSeries(object):
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_range_edges(self):
# GH#13672
idx = pd.date_range(start=Timestamp('1970-01-01 00:00:00.000000001'),
end=Timestamp('1970-01-01 00:00:00.000000004'),
freq='N')
exp = DatetimeIndex(['1970-01-01 00:00:00.000000001',
'1970-01-01 00:00:00.000000002',
'1970-01-01 00:00:00.000000003',
'1970-01-01 00:00:00.000000004'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 00:00:00.000000004'),
end=Timestamp('1970-01-01 00:00:00.000000001'),
freq='N')
exp = DatetimeIndex([])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 00:00:00.000000001'),
end=Timestamp('1970-01-01 00:00:00.000000001'),
freq='N')
exp = DatetimeIndex(['1970-01-01 00:00:00.000000001'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 00:00:00.000001'),
end=Timestamp('1970-01-01 00:00:00.000004'),
freq='U')
exp = DatetimeIndex(['1970-01-01 00:00:00.000001',
'1970-01-01 00:00:00.000002',
'1970-01-01 00:00:00.000003',
'1970-01-01 00:00:00.000004'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 00:00:00.001'),
end=Timestamp('1970-01-01 00:00:00.004'),
freq='L')
exp = DatetimeIndex(['1970-01-01 00:00:00.001',
'1970-01-01 00:00:00.002',
'1970-01-01 00:00:00.003',
'1970-01-01 00:00:00.004'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 00:00:01'),
end=Timestamp('1970-01-01 00:00:04'), freq='S')
exp = DatetimeIndex(['1970-01-01 00:00:01', '1970-01-01 00:00:02',
'1970-01-01 00:00:03', '1970-01-01 00:00:04'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 00:01'),
end=Timestamp('1970-01-01 00:04'), freq='T')
exp = DatetimeIndex(['1970-01-01 00:01', '1970-01-01 00:02',
'1970-01-01 00:03', '1970-01-01 00:04'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01 01:00'),
end=Timestamp('1970-01-01 04:00'), freq='H')
exp = DatetimeIndex(['1970-01-01 01:00', '1970-01-01 02:00',
'1970-01-01 03:00', '1970-01-01 04:00'])
tm.assert_index_equal(idx, exp)
idx = pd.date_range(start=Timestamp('1970-01-01'),
end=Timestamp('1970-01-04'), freq='D')
exp = DatetimeIndex(['1970-01-01', '1970-01-02',
'1970-01-03', '1970-01-04'])
tm.assert_index_equal(idx, exp)
class TestDatetime64(object):
def test_datetimeindex_accessors(self):
dti_naive = pd.date_range(freq='D', start=datetime(1998, 1, 1),
periods=365)
# GH#13303
dti_tz = pd.date_range(freq='D', start=datetime(1998, 1, 1),
periods=365, tz='US/Eastern')
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.weekofyear[0] == 1
assert dti.weekofyear[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.weekofyear) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
assert len(dti.weekday_name) == 365
dti.name = 'name'
# non boolean accessors -> return Index
for accessor in DatetimeIndex._field_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == 'name'
# boolean accessors -> return array
for accessor in DatetimeIndex._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq='D', tz=dti.tz, name='name')
tm.assert_index_equal(res, exp)
dti = pd.date_range(freq='BQ-FEB', start=datetime(1998, 1, 1),
periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
assert dti.is_month_start[0] == 1
tests = [
(Timestamp('2013-06-01', freq='M').is_month_start, 1),
(Timestamp('2013-06-01', freq='BM').is_month_start, 0),
(Timestamp('2013-06-03', freq='M').is_month_start, 0),
(Timestamp('2013-06-03', freq='BM').is_month_start, 1),
(Timestamp('2013-02-28', freq='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', freq='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', freq='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', freq='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', freq='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', freq='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', freq='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', freq='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', freq='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', freq='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', freq='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', freq='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', freq='BQ').is_month_end, 0),
(Timestamp('2013-06-30', freq='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', freq='BQ').is_year_end, 0),
(Timestamp('2013-06-28', freq='BQ').is_month_end, 1),
(Timestamp('2013-06-28', freq='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', freq='BQ').is_year_end, 0),
(Timestamp('2013-06-30', freq='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', freq='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', freq='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', freq='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', freq='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', freq='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', freq='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', freq='AS-NOV').is_year_end, 1),
(Timestamp('2012-02-01').days_in_month, 29),
(Timestamp('2013-02-01').days_in_month, 28)]
for ts, value in tests:
assert ts == value
# GH 6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
expected = [52, 1, 1]
assert dates.weekofyear.tolist() == expected
assert [d.weekofyear for d in dates] == expected
# GH 12806
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_datetime_name_accessors(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
expected_months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September',
'October', 'November', 'December']
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
# GH#11128
dti = pd.date_range(freq='D', start=datetime(1998, 1, 1),
periods=365)
english_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
for day, name, eng_name in zip(range(4, 11),
expected_days,
english_days):
name = name.capitalize()
assert dti.weekday_name[day] == eng_name
assert dti.day_name(locale=time_locale)[day] == name
ts = Timestamp(datetime(2016, 4, day))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert ts.weekday_name == eng_name
assert ts.day_name(locale=time_locale) == name
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.day_name(locale=time_locale)[-1])
ts = Timestamp(pd.NaT)
assert np.isnan(ts.day_name(locale=time_locale))
# GH#12805
dti = pd.date_range(freq='M', start='2012', end='2013')
result = dti.month_name(locale=time_locale)
expected = Index([month.capitalize() for month in expected_months])
# work around different normalization schemes
# https://github.com/pandas-dev/pandas/issues/22342
if not compat.PY2:
result = result.str.normalize("NFD")
expected = expected.str.normalize("NFD")
tm.assert_index_equal(result, expected)
for date, expected in zip(dti, expected_months):
result = date.month_name(locale=time_locale)
expected = expected.capitalize()
if not compat.PY2:
result = unicodedata.normalize("NFD", result)
expected = unicodedata.normalize("NFD", result)
assert result == expected
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.month_name(locale=time_locale)[-1])
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
tm.assert_index_equal(dti.nanosecond,
pd.Index(np.arange(10, dtype=np.int64)))
|
bsd-3-clause
|
Lawrence-Liu/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
dimkal/mne-python
|
examples/inverse/plot_lcmv_beamformer_volume.py
|
18
|
3046
|
"""
===================================================================
Compute LCMV inverse solution on evoked data in volume source space
===================================================================
Compute LCMV inverse solution on an auditory evoked dataset in a volume source
space. It stores the solution in a nifti file for visualisation e.g. with
Freeview.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.beamformer import lcmv
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.read_cov(fname_cov)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
# Run free orientation (vector) beamformer. Source orientation can be
# restricted by setting pick_ori to 'max-power' (or 'normal' but only when
# using a surface-based source space)
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, pick_ori=None)
# Save result in stc files
stc.save('lcmv-vol')
stc.crop(0.0, 0.2)
# Save result in a 4D nifti file
img = mne.save_stc_as_volume('lcmv_inverse.nii.gz', stc,
forward['src'], mri_resolution=False)
t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
# Plotting with nilearn ######################################################
plot_stat_map(index_img(img, 61), t1_fname, threshold=0.8,
title='LCMV (t=%.1f s.)' % stc.times[61])
# plot source time courses with the maximum peak amplitudes
plt.figure()
plt.plot(stc.times, stc.data[np.argsort(np.max(stc.data, axis=1))[-40:]].T)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.show()
|
bsd-3-clause
|
rajat1994/scikit-learn
|
examples/svm/plot_iris.py
|
225
|
3252
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
draperjames/bokeh
|
bokeh/models/tests/test_sources.py
|
2
|
9144
|
from __future__ import absolute_import
import unittest
from unittest import skipIf
import warnings
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
from bokeh.models.sources import DataSource, ColumnDataSource
from bokeh.util.serialization import transform_column_source_data
class TestColumnDataSource(unittest.TestCase):
def test_basic(self):
ds = ColumnDataSource()
self.assertTrue(isinstance(ds, DataSource))
def test_init_dict_arg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
def test_init_dict_data_kwarg(self):
data = dict(a=[1], b=[2])
ds = ColumnDataSource(data=data)
self.assertEquals(ds.data, data)
self.assertEquals(set(ds.column_names), set(data.keys()))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_arg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
@skipIf(not is_pandas, "pandas not installed")
def test_init_pandas_data_kwarg(self):
data = dict(a=[1, 2], b=[2, 3])
df = pd.DataFrame(data)
ds = ColumnDataSource(data=df)
self.assertTrue(set(df.columns).issubset(set(ds.column_names)))
for key in data.keys():
self.assertEquals(list(df[key]), data[key])
self.assertEqual(set(ds.column_names) - set(df.columns), set(["index"]))
def test_add_with_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], name="foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6], name="bar")
self.assertEquals(name, "bar")
def test_add_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3])
self.assertEquals(name, "Series 0")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_add_with_and_without_name(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
self.assertEquals(name, "foo")
name = ds.add([4,5,6])
self.assertEquals(name, "Series 1")
def test_remove_exists(self):
ds = ColumnDataSource()
name = ds.add([1,2,3], "foo")
assert name
ds.remove("foo")
self.assertEquals(ds.column_names, [])
def test_remove_exists2(self):
with warnings.catch_warnings(record=True) as w:
ds = ColumnDataSource()
ds.remove("foo")
self.assertEquals(ds.column_names, [])
self.assertEquals(len(w), 1)
self.assertEquals(w[0].category, UserWarning)
self.assertEquals(str(w[0].message), "Unable to find column 'foo' in data source")
def test_stream_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
with self.assertRaises(ValueError) as cm:
ds.stream(dict())
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: a, b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], x=[10]))
self.assertEqual(str(cm.exception), "Must stream updates to all existing columns (missing: b, extra: x)")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=[10, 20]))
self.assertEqual(str(cm.exception), "All streaming column updates must be the same length")
with self.assertRaises(ValueError) as cm:
ds.stream(dict(a=[10], b=np.ones((1,1))))
self.assertTrue(
str(cm.exception).startswith("stream(...) only supports 1d sequences, got ndarray with size (")
)
def test_stream_good_data(self):
ds = ColumnDataSource(data=dict(a=[10], b=[20]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._stream = mock
ds.stream(dict(a=[11, 12], b=[21, 22]), "foo", mock_setter)
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[11, 12], b=[21, 22]), "foo", mock_setter))
self.assertEqual(stuff['kw'], {})
def test_patch_bad_data(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(3, 100)]))
self.assertEqual(str(cm.exception), "Out-of bounds index (3) in patch for column: a")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(c=[(0, 100)]))
self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c)")
with self.assertRaises(ValueError) as cm:
ds.patch(dict(a=[(0,100)], c=[(0, 100)], d=[(0, 100)]))
self.assertEqual(str(cm.exception), "Can only patch existing columns (extra: c, d)")
def test_patch_good_data(self):
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
ds._document = "doc"
stuff = {}
mock_setter = object()
def mock(*args, **kw):
stuff['args'] = args
stuff['kw'] = kw
ds.data._patch = mock
ds.patch(dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter)
self.assertEqual(stuff['args'], ("doc", ds, dict(a=[(0,100), (1,101)], b=[(0,200)]), mock_setter))
self.assertEqual(stuff['kw'], {})
def test_data_column_lengths(self):
# TODO: use this when soft=False
#
#with self.assertRaises(ValueError):
# ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))
#
#ds = ColumnDataSource()
#with self.assertRaises(ValueError):
# ds.data = dict(a=[10, 11], b=[20, 21, 22])
#
#ds = ColumnDataSource(data=dict(a=[10, 11]))
#with self.assertRaises(ValueError):
# ds.data["b"] = [20, 21, 22]
#
#ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
#with self.assertRaises(ValueError):
# ds.data.update(dict(a=[10, 11, 12]))
with warnings.catch_warnings(record=True) as warns:
ColumnDataSource(data=dict(a=[10, 11], b=[20, 21, 22]))
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length")
ds = ColumnDataSource()
with warnings.catch_warnings(record=True) as warns:
ds.data = dict(a=[10, 11], b=[20, 21, 22])
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length")
ds = ColumnDataSource(data=dict(a=[10, 11]))
with warnings.catch_warnings(record=True) as warns:
ds.data["b"] = [20, 21, 22]
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length")
ds = ColumnDataSource(data=dict(a=[10, 11], b=[20, 21]))
with warnings.catch_warnings(record=True) as warns:
ds.data.update(dict(a=[10, 11, 12]))
self.assertEquals(len(warns), 1)
self.assertEquals(str(warns[0].message), "ColumnDataSource's columns must be of the same length")
def test_set_data_from_json_list(self):
ds = ColumnDataSource()
data = {"foo": [1, 2, 3]}
ds.set_from_json('data', data)
self.assertEquals(ds.data, data)
def test_set_data_from_json_base64(self):
ds = ColumnDataSource()
data = {"foo": np.arange(3)}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64(self):
ds = ColumnDataSource()
data = {"foo": [[np.arange(3)]]}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
def test_set_data_from_json_nested_base64_and_list(self):
ds = ColumnDataSource()
data = {"foo": [np.arange(3), [1, 2, 3]]}
json = transform_column_source_data(data)
ds.set_from_json('data', json)
self.assertTrue(np.array_equal(ds.data["foo"], data["foo"]))
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
ueshin/apache-spark
|
python/pyspark/sql/pandas/group_ops.py
|
23
|
14683
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
class PandasGroupedOpsMixin(object):
"""
Min-in for pandas grouped operations. Currently, only :class:`GroupedData`
can use this class.
"""
def apply(self, udf):
"""
It is an alias of :meth:`pyspark.sql.GroupedData.applyInPandas`; however, it takes a
:meth:`pyspark.sql.functions.pandas_udf` whereas
:meth:`pyspark.sql.GroupedData.applyInPandas` takes a Python native function.
.. versionadded:: 2.3.0
Parameters
----------
udf : :func:`pyspark.sql.functions.pandas_udf`
a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
Notes
-----
It is preferred to use :meth:`pyspark.sql.GroupedData.applyInPandas` over this
API. This API will be deprecated in the future releases.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
See Also
--------
pyspark.sql.functions.pandas_udf
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
warnings.warn(
"It is preferred to use 'applyInPandas' over this "
"API. This API will be deprecated in the future releases. See SPARK-28264 for "
"more details.", UserWarning)
return self.applyInPandas(udf.func, schema=udf.returnType)
def applyInPandas(self, func, schema):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes a `pandas.DataFrame`, and outputs a
`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, ceil
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").applyInPandas(
... normalize, schema="id long, v double").show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can pass a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def mean_func(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').applyInPandas(
... mean_func, schema="id long, v double").show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> def sum_func(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).applyInPandas(
... sum_func, schema="id long, `ceil(v / 2)` long, v double").show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
Notes
-----
This function requires a full shuffle. All the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import GroupedData
from pyspark.sql.functions import pandas_udf, PandasUDFType
assert isinstance(self, GroupedData)
udf = pandas_udf(
func, returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def cogroup(self, other):
"""
Cogroups this group with another group so that we can run cogrouped operations.
.. versionadded:: 3.0.0
See :class:`PandasCogroupedOps` for the operations that can be run.
"""
from pyspark.sql import GroupedData
assert isinstance(self, GroupedData)
return PandasCogroupedOps(self, other)
class PandasCogroupedOps(object):
"""
A logical grouping of two :class:`GroupedData`,
created by :func:`GroupedData.cogroup`.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental.
"""
def __init__(self, gd1, gd2):
self._gd1 = gd1
self._gd2 = gd2
self.sql_ctx = gd1.sql_ctx
def applyInPandas(self, func, schema):
"""
Applies a function to each cogroup using pandas and returns the result
as a `DataFrame`.
The function should take two `pandas.DataFrame`\\s and return another
`pandas.DataFrame`. For each side of the cogroup, all columns are passed together as a
`pandas.DataFrame` to the user-function and the returned `pandas.DataFrame` are combined as
a :class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes two `pandas.DataFrame`\\s, and
outputs a `pandas.DataFrame`, or that takes one tuple (grouping keys) and two
pandas ``DataFrame``\\s, and outputs a pandas ``DataFrame``.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df1 = spark.createDataFrame(
... [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
... ("time", "id", "v1"))
>>> df2 = spark.createDataFrame(
... [(20000101, 1, "x"), (20000101, 2, "y")],
... ("time", "id", "v2"))
>>> def asof_join(l, r):
... return pd.merge_asof(l, r, on="time", by="id")
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, schema="time int, id int, v1 double, v2 string"
... ).show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
|20000101| 2|2.0| y|
|20000102| 2|4.0| y|
+--------+---+---+---+
Alternatively, the user can define a function that takes three arguments. In this case,
the grouping key(s) will be passed as the first argument and the data will be passed as the
second and third arguments. The grouping key(s) will be passed as a tuple of numpy data
types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as two
`pandas.DataFrame` containing all columns from the original Spark DataFrames.
>>> def asof_join(k, l, r):
... if k == (1,):
... return pd.merge_asof(l, r, on="time", by="id")
... else:
... return pd.DataFrame(columns=['time', 'id', 'v1', 'v2'])
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, "time int, id int, v1 double, v2 string").show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
+--------+---+---+---+
Notes
-----
This function requires a full shuffle. All the data of a cogroup will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql.pandas.functions import pandas_udf
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF)
all_cols = self._extract_cols(self._gd1) + self._extract_cols(self._gd2)
udf_column = udf(*all_cols)
jdf = self._gd1._jgd.flatMapCoGroupsInPandas(self._gd2._jgd, udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
@staticmethod
def _extract_cols(gd):
df = gd._df
return [df[col] for col in df.columns]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.group_ops
globs = pyspark.sql.pandas.group_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.group tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.group_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
iskold/fasta-util
|
bin/downsampler.py
|
1
|
7081
|
#!/usr/bin/python -u
# https://github.com/iskold
# Created Tue Jun 28 21:21:01 CEST 2016
import argparse,sys,os,re,math,time #standard libs I frequently use
from datetime import datetime as dt
parser = argparse.ArgumentParser(prog="downsample.py",usage="python3 downsample.py -f -t <target-read-count> -i <fastq-file> -o <fastq-file.output>",epilog="Written by https://github.com/iskold, on Tue Jun 28 21:21:01 CEST 2016",description="Description: Randomly downsamples a fastq-file to a specific count. NOTE! Does not normalize. If you want to normalize, use BBmap, and the tool called BBNorm.sh. You should downsample if you want to simulate lower sequencing depth. For example if you want to do gene counts matrices based on reads across samples with different depths.")
parser.add_argument("-i",metavar="fastq", help="Input file",required="True",nargs="*")
parser.add_argument("-o",metavar="fasta", help="Output file. Default: STDOUT. If paired end data, STDOUT will be disabled and output will automatically have 'DS' added to the input file name.")
parser.add_argument("-t",metavar="number",help="Target read count",required="True",type=int,nargs=1)
parser.add_argument("-a", help="Low-memory mode (uses basically no memory, but results in the target read counts +/- a few reads. Default: Off.",action="store_true")
parser.add_argument("-v",help="Verbose. Prints helpful progress message to STDERR. Default is off.",action="store_true")
args = parser.parse_args()
if args.v:
def eprint(*args,**kwargs):
for arg in args:
print(arg, file=sys.stderr, **kwargs)
sys.stdout.flush()
else:
def eprint(*args,**kwarfs):
pass
#eprint = lambda *a: None
if args.o and len(args.i) == 1:
def oprint(*args,**kwargs):
if args[0] == "tmp2":
pass
else:
writefastqs(args[0],args[1])
# writefastqs(args,kwargs)
elif not args.o and len(args.i) == 1:
def oprint(*args,**kwargs):
if args[0] == "tmp":
pass
else:
print("{0}".format(args[1].rstrip()))
# "{0}args[1],args[3]".rstrip(),end="******")
else:
def oprint(*args,**kwargs):
writefastqs(args[0],args[1],args[2],args[3])
#writefastqs(args,kwargs)
#if args.o:
# def oprint(*args,**kwargs):
# print(args[1])
# for arg in args:
# print(arg,**kwargs)
#else:
# def oprint(*args,**kwargs):
# for arg in args:
if len(args.i) == 1: #syntax is writefastqs(fid1,entry1,fid2,entry2)
fastq1 = args.i[0]
if not args.o:
fastq1o = "tmp2"
# fastq1o = ".".join(args.i[0].split(".")[0:-1])+".DS.fq"
else:
fastq1o = args.o
fastq2 = "tmp1"
fastq2o = "tmp2"
def writefastqs(*args,**kwargs):
sup = args[0].write(args[1])
elif len(args.i) == 2:
fastq1 = args.i[0]
fastq1o = ".".join(args.i[0].strip(".gz").strip(".gzip").split(".")[0:-1])+".DS.fq"
fastq1o = "./"+fastq1o.split("/")[-1]
fastq2 = args.i[1]
fastq2o = ".".join(args.i[1].strip(".gz").strip(".gzip").split(".")[0:-1])+".DS.fq"
fastq2o = "./"+fastq2o.split("/")[-1]
def writefastqs(*args,**kwargs):
sup = args[0].write(args[1])
sup = args[2].write(args[3])
else:
sys.exit("Number of input arguments ({0}) not understood".format(len(args.i)))
eprint("#Provided arguments: {0}".format(args))
start = time.time()
d_ = dt.today()
timestarted = d_.strftime("%Y-%m-%d %H:%M")
eprint("#Started at: {0}".format(timestarted))
eprint("\n#Loading modules...",end="")
import numpy as np
import matplotlib.pyplot as plt
import random
import gzip
eprint("Done!")
sys.stdout.flush()
n = args.t[0]
eprint("#Counting fastq entries...",end="")
def fastqItr(myfile):
if myfile == "tmp1":
while 1:
yield ""
ext = myfile.split(".")[-1]
if ext == "gz" or ext == "gzip":
with gzip.open(myfile,"rt") as fid:
entry = ""
line = fid.readline()
while line:
entry += line
for i in range(3):
entry += fid.readline()
yield entry
entry = ""
line = fid.readline()
else:
with open(myfile) as fid:
entry = ""
line = fid.readline()
while line:
entry += line
for i in range(3):
entry += fid.readline()
yield entry
entry = ""
line = fid.readline()
def fastqItr_generator(fid): #Had to split this up because of gzip, and "with"-statement messing up
entry = ""
line = fid.readline()
while line:
entry += line
for i in range(3):
entry += fid.readline()
yield entry
entry = ""
line = fid.readline()
c = 0
for entry in fastqItr(fastq1):
c+=1
eprint("Done! (Entries: {0})".format(c))
if n > c:
sys.exit("#ERROR!!!! Target number ({0}) is higher than the number of fastq-entries ({1})!".format(n,c))
#ratio = int(round(100*n/(c+0.0)))
ratio = n/(c+0.0)
#clist = [1 for x in range(ratio)]+[0 for x in range(100-ratio)]
#random.shuffle(clist)
eprint("#Building random list...",end="")
clist = []
if not args.a: # make a list of numbers
clist = set()
while len(clist) != n:
clist.add(random.randint(0,c-1))
clist = sorted(clist)
eprint("Done!")
else:
eprint("Skipped!")
eprint("#Choosing fastq entries...",end="")
#with open("random.list") as fid:
def up2date(f,x,cc,c):
# print(x,cc,c,file=sys.stderr)
while cc <= x and cc != c+1:
cc += 1
entry = next(f)
# print(x,cc,c,file=sys.stderr)
return entry,cc
if not clist:
with open(fastq1o,"w") as fo1, open(fastq2o,"w") as fo2:
i = 0
c2 = 0
k = 0
new = list()
f1 = fastqItr(fastq1)
f2 = fastqItr(fastq2)
for line in f1:
if random.random() <= ratio:
i += 1
entry,c2 = up2date(f2,k,c2,c)
oprint(fo1,line,fo2,entry)
k += 1
else:
with open(fastq1o,"w") as fo1, open(fastq2o,"w") as fo2:
# eprint(clist,len(clist))
i = len(clist)
c1 = 0
c2 = 0
k = 0
new = list()
f1 = fastqItr(fastq1)
f2 = fastqItr(fastq2)
for x in clist:
entry,c1 = up2date(f1,x,c1,c)
entry2,c2 = up2date(f2,x,c2,c)
oprint(fo1,entry,fo2,entry2)
eprint("Done!\n")
if os.path.isfile("tmp1"):
os.remove("tmp1")
if os.path.isfile("tmp2"):
os.remove("tmp2")
timeused = (time.time() - start) / 60.0
#eprint("Time used: "+str(round(timeused)) + " min ("+str(round(timeused/60,1))+" hours)\n",args.quiet)
timefinish = d_.strftime("%Y-%m-%d %H:%M")
eprint("#Finished at: {0}".format(timefinish))
eprint("#Time used: {0} min ({1} hours)".format(round(timeused,2),round(timeused/60.0,1)))
sys.exit() #Legacy code below this line. Mostly used to visualize the randomness of the algorithm
if 1:
i = 0
new = list()
for line in fid:
#if random.choice(clist) == 1:
if random.random() <= ratio:
i += 1
new.append(line.rstrip())
y = [int(i) for i in new]
x = range(n)
x = [i+1 for i in x]
yy = list()
for i in range(c):
if i in y:
yy.append(1)
else:
yy.append(0)
zz1 = list()
for i in range(c):
if i in zz:
zz1.append(1)
else:
zz1.append(0)
plt.plot(x[0:1000], yy[0:1000])
plt.plot(x[0:1000], zz1[0:1000])
plt.show()
timeused = (time.time() - start) / 60.0
#eprint("Time used: "+str(round(timeused)) + " min ("+str(round(timeused/60,1))+" hours)\n",args.quiet)
timefinish = d_.strftime("%Y-%m-%d_%H:%M")
eprint("#Finished at: {0}".format(timefinish))
eprint("#Time used: {0} min ({1} hours)".format(timeused,round(timeused/60.0,1)))
|
gpl-3.0
|
DuraCopter/ardupilot
|
libraries/AP_Math/tools/geodesic_grid/plot.py
|
110
|
2876
|
# Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
|
gpl-3.0
|
jplourenco/bokeh
|
bokeh/compat/mpl.py
|
32
|
2834
|
"Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from warnings import warn
import matplotlib.pyplot as plt
from .bokeh_exporter import BokehExporter
from .bokeh_renderer import BokehRenderer
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def to_bokeh(fig=None, name=None, server=None, notebook=None, pd_obj=True, xkcd=False):
""" Uses bokeh to display a Matplotlib Figure.
You can store a bokeh plot in a standalone HTML file, as a document in
a Bokeh plot server, or embedded directly into an IPython Notebook
output cell.
Parameters
----------
fig: matplotlib.figure.Figure
The figure to display. If None or not specified, then the current figure
will be used.
name: str (default=None)
If this option is provided, then the Bokeh figure will be saved into
this HTML file, and then a web browser will be used to display it.
server: str (default=None)
Fully specified URL of bokeh plot server. Default bokeh plot server
URL is "http://localhost:5006" or simply "deault"
notebook: bool (default=False)
Return an output value from this function which represents an HTML
object that the IPython notebook can display. You can also use it with
a bokeh plot server just specifying the URL.
pd_obj: bool (default=True)
The implementation asumes you are plotting using the pandas.
You have the option to turn it off (False) to plot the datetime xaxis
with other non-pandas interfaces.
xkcd: bool (default=False)
If this option is True, then the Bokeh figure will be saved with a
xkcd style.
"""
if name is not None:
warn("Use standard output_file(...) from bokeh.io")
if server is not None:
warn("Use standard output_server(...) from bokeh.io")
if notebook is not None:
warn("Use standard output_notebook() from bokeh.io")
if fig is None:
fig = plt.gcf()
renderer = BokehRenderer(pd_obj, xkcd)
exporter = BokehExporter(renderer)
exporter.run(fig)
return renderer.fig
|
bsd-3-clause
|
yonglehou/scikit-learn
|
examples/ensemble/plot_partial_dependence.py
|
249
|
4456
|
"""
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
|
bsd-3-clause
|
jaganadhg/sdcnd_p1_lld
|
thirdeye/pipeline.py
|
1
|
1306
|
#!/usr/bin/env python
"""
Author : Jaganadh Gopinadhan
e-mail : [email protected]
Licence : MIT
Third Eye V1.0 Udacity Self Driving Car Nano Degree Project 1
Finding Lane Lines on the Road
"""
import matplotlib.image as mpimg
import numpy as np
from moviepy.editor import VideoFileClip
from pputil import FrameTransformer
from lineutil import LineDrawBase, plot_img
class Pipeline(object):
"""
Basic pipeline for Lane Line detection
TODO : Improve with more features
"""
def __init__(self):
self.rho = 1
self.theta = np.pi/180
self.threshold = 15
self.min_line_len = 25
self.max_line_gap = 250
self.preprocessor = FrameTransformer()
self.annotator = LineDrawBase()
def fit_frame(self,image):
"""
Preprocess and draw image
"""
roi = self.preprocessor.transform(image)
annotated = self.annotator.draw(image,roi,self.rho,self.theta,\
self.threshold, self.min_line_len,self.max_line_gap)
return annotated
def fit_vid(self,vidfile):
"""
Process video file
"""
vf = VideoFileClip(vidfile)
white_clip = vf.fl_image(self.fit_frame)
return white_clip
if __name__ == "__main__":
print
|
mit
|
deepesch/scikit-learn
|
sklearn/tests/test_cross_validation.py
|
19
|
44125
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
bsd-3-clause
|
Crompulence/cpl-library
|
test/lammps/single/no_wall/constant_force/falling.py
|
1
|
1218
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import argrelextrema
def analytical_gravity(z0, v0, t0, t, g=9.81):
return z0 + v0*(t-t0) - 0.5*g*(t-t0)**2
def read_data(logfile='./log.lammps',
datafile='./thermo_output.txt'):
#Get data from file
with open(logfile) as f:
for l in f.readlines():
if l.find("timestep") != -1:
dt = float(l.strip('timestep'))
break
#print("timestep = ", dt)
data = np.genfromtxt(datafile)
t = data[:,0]*dt
z = data[:,1]
v = data[:,2]
f = data[:,3]
return t, z, v, f
def unwrap(z):
Lz = 2.5e-3
znew = np.zeros(z.shape[0])
znew[0] = z[0]
for i in range(1,z.shape[0]):
dz = z[i] - z[i-1]
if dz > 0.:
dz -= Lz
znew[i] = znew[i-1] + dz
return znew
def check_falling_error_vs_gravity(D=3.5e-4, g=9.81):
t, z, v, f = read_data()
#Error vs gravity
za = analytical_gravity(z[0], v[0], t[0], t, g)
error = unwrap(z) - za
return np.array(error)
if __name__ == "__main__":
error = check_falling_error_vs_gravity()
plt.plot(error)
plt.show()
|
gpl-3.0
|
AndresYague/Snuppat
|
output/figuresAndTables/plotHsLsObs.py
|
1
|
2407
|
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def main():
if len(sys.argv) < 2:
print("Usage: python {} profile1 <profile2 ...>".format(sys.argv[0]))
return 1
colors = ["b:", "g--", "y-"]
labels = ["$\omega = 0.10$", "$\omega = 0.12$", "$\omega = 0.14$"]
ii = 0
for arch in sys.argv[1:]:
# Read event
fread = open(arch, "r")
# Get xx, yy
xxArr = []; yyArr = []
for line in fread:
if "#" in line:
continue
xx, yy = map(float, line.split())
xxArr.append(xx)
yyArr.append(yy)
plt.plot(xxArr, yyArr, colors[ii], label = labels[ii], lw = 2)
ii += 1
# Observations values
# (AW Cyg, S Sct, SS Vir, SZ Sgr, U Hya, V460 Cyg, Z Psc)
# Obs values: Rb, Sr, Y, Zr, Ba, La, Ce, Nd, Sm
xxObs = [37, 38, 39, 40, 56, 57, 58, 60, 62]
yyErrs = [0.25, 0.20, 0.20, 0.20, 0.3, 0.40, 0.45, 0.40, 0.40]
abiStars = [
[0.2, 0.4, 0.3, 0.5, 0, 0.3, "-", 0.5, "-"],
[0.5, 0.8, 0.7, 0.5, 0.2, 0.1, "-", 0.2, "-"],
["-", 0.0, 0.5, 0.4, 0.3, 0.4, "-", 0.3, "-"],
[0.1, 0.4, 0.9, 0.8, 0.8, 0.9, "-", 0.9, 0.6],
[0.5, 0.8, 1.3, 1.1, 1.1, 0.9, 0.6, 1.0, 0.7],
[0.4, 0.5, 0.7, 0.8, 0.8, 0.7, "-", 0.8, 0.4],
[0.6, 0.9, 1.0, 1.0, 1.0, 1.1, 0.6, 0.9, 0.8]
]
# Plot observations with errors
gray = (0.75, 0.75, 0.75)
star_ii = 0
for star in abiStars:
# Calculate things
hsFe = sum(star[1:4])/3.; hsErr = sum(yyErrs[1:4])/3.
lsFe = sum(star[4:6])/2.; lsErr = sum(yyErrs[4:6])/2.
hsLs = hsFe - lsFe; hsLsErr = hsErr + lsErr
sFe = sum(star[1:6])/5.; sErr = sum(yyErrs[1:6])/5.
# Set style
if star_ii == 2 or star_ii == 3:
col = "sr"
elif star_ii == 0 or star_ii == 5:
col = "vb"
else:
col = "^k"
# Now plot it with error bars
plt.errorbar(sFe, hsLs, fmt = col, xerr = sErr, yerr = hsLsErr)
star_ii += 1
plt.xlabel("[s/Fe]", size = 12)
plt.ylabel("[hs/ls]", size = 12)
plt.legend(loc = 0, ncol = 2, prop = {"size": 12})
plt.show()
if __name__ == "__main__":
main()
|
mit
|
BavoGoosens/Gaiter
|
main.py
|
1
|
13115
|
import sys
from data_utils.framer import *
from data_utils.data_loader import *
from sklearn.feature_selection import SelectKBest
from sklearn.svm import LinearSVC
from sklearn.feature_selection import f_classif
from sklearn.ensemble import ExtraTreesClassifier
from personal_classifier.support_vector_machine import SupportVectorMachine
from feature_extraction.feature_extractor import add_pearson
from feature_extraction.time_domain_feature_extractor import *
from feature_extraction.frequency_domain_feature_extractor import *
from feature_extraction.wavelet_feature_extractor import *
from personal_classifier.ada_boost import *
from personal_classifier.logistic_regression import *
from personal_classifier.random_forrests import *
from personal_classifier.support_vector_machine import *
from walking_classifier.k_means import *
from walking_classifier.k_means_mini_batch import *
from walking_classifier.mean_shift import *
from walking_classifier.db_scan import *
from monitor.timer import Timer
from scipy.sparse import csr_matrix
import monitor.time_complexity_monitor as moni
from validator import *
import os.path
import random
import numpy as np
import math
def main(argv):
# Welcome message
global walking_classifier
print ""
print "Welcome to Gaiter..."
print "First, Gaiter will load training data."
# Frame all the data
frame_size = 128
frame_overlap = 64
# Check for previous data
previous_data = False
if os.path.isfile('trainingdata.npy'):
# Ask user for using previous data
previous_data_answer = raw_input("Gaiter has discovered there is some data available from a previous session. "
"This data is already framed and all features are already calculated. Do you "
"want to use this data? (y/n) ")
if previous_data_answer == 'y' or previous_data_answer == 'Y':
print "Using the data from a previous session."
previous_data = True
print ""
# Check if user wants load new data
if not previous_data:
# Ask user for path to directory of training data
data_path = raw_input("Please enter the path to the directory of the training data (end your path with '/' and default=data/train/): ")
# Load all the data
print "Loading data from '"+data_path+"'."
print "..."
print ""
data_loader = DataLoader(data_path)
raw_data_list = data_loader.get_raw_data()
print str(len(raw_data_list)) + " files found."
print "Framing all the data."
print "..."
print ""
framer = Framer(frame_size, frame_overlap)
for raw_data in raw_data_list:
framer.frame(raw_data)
framed_raw_data_list = framer.get_framed_raw_data_list()
length = 0
for frd in framed_raw_data_list:
length += len(frd.get_frames())
print "Some files were not large enough for framing... "+str(len(framed_raw_data_list))+\
" files are divided into "+str(length)+" frames."
print ""
split = raw_input("What is k for your k-fold cross-validation? This determines "
"the train/test split (default=5): ")
split = int(split)
split = 1 - (1 / float(split))
nb = int(len(framed_raw_data_list) * split)
seed = raw_input("enter seed: ")
random.seed(int(seed))
random.shuffle(framed_raw_data_list)
train_raw_data_list = framed_raw_data_list[:nb]
test_raw_data_list = framed_raw_data_list[nb:]
use_derivative = raw_input("Would you like to add the features calculated "
"from the first order derivative?(default = True): ")
use_derivative = use_derivative.lower() == str(True).lower()
# Extract features
time_feature_extractor = TimeDomainFeatureExtractor(use_derivative)
freq_feature_extractor = FrequencyDomainFeatureExtractor(use_derivative)
wav_feature_extractor = WaveletFeatureExtractor(use_derivative)
print "Extracting features for training-frames. This may take a while."
print "..."
print ""
bumpy_data_set = list()
raw_data_count = 0
for framed_raw_data in train_raw_data_list:
print str(round(raw_data_count/float(len(train_raw_data_list))*100, 2))+" %"
for frame in framed_raw_data.get_frames():
featured_frame = time_feature_extractor.extract_features(frame)
featured_frame = freq_feature_extractor.extract_features(featured_frame)
featured_frame = wav_feature_extractor.extract_features(featured_frame)
featured_frame = add_pearson(featured_frame, False)
bumpy_data_set.append(featured_frame)
raw_data_count = raw_data_count + 1
print "100 %"
print ""
print "Training features are calculated. Writing all data to hard disk for later use."
print "..."
print ""
flat_train_data_set = flatten(bumpy_data_set)
train_data_set = np.array(flat_train_data_set)
train_labels = np.array(extract_labels(bumpy_data_set))
np.save('trainingdata', train_data_set)
np.save('traininglabels', train_labels)
print "Extracting features for testing-frames. This may take a while."
print "..."
print ""
bumpy_data_set = list()
raw_data_count = 0
for framed_raw_data in test_raw_data_list:
print str(round(raw_data_count/float(len(test_raw_data_list))*100, 2))+" %"
for frame in framed_raw_data.get_frames():
featured_frame = time_feature_extractor.extract_features(frame)
featured_frame = freq_feature_extractor.extract_features(featured_frame)
featured_frame = wav_feature_extractor.extract_features(featured_frame)
featured_frame = add_pearson(featured_frame, False)
bumpy_data_set.append(featured_frame)
raw_data_count = raw_data_count + 1
print "100 %"
print ""
print "Training features are calculated. Writing all data to hard disk for later use."
print "..."
print ""
random.seed(0)
random.shuffle(bumpy_data_set)
flat_test_data_set = flatten(bumpy_data_set)
test_data_set = np.array(flat_test_data_set)
test_labels = np.array(extract_labels(bumpy_data_set))
np.save('testdata', test_data_set)
np.save('testlabels', test_labels)
print "All data is written to hard drive."
print ""
print ("The training data set's dimension is " + str(train_data_set.shape))
print ("The testing data set's dimension is " + str(test_data_set.shape))
else:
print "Loading data from previous session."
print "..."
train_data_set = np.load("trainingdata.npy")
train_labels = np.load("traininglabels.npy")
test_data_set = np.load("testdata.npy")
test_labels = np.load("testlabels.npy")
print "All data from previous session loaded."
print ""
print ("The training data set's dimension is " + str(train_data_set.shape))
print ("The testing data set's dimension is " + str(test_data_set.shape))
print ""
print "Gaiter will now train the walking classifier."
print "List of walking classifiers:"
print "1) K-means"
print "2) K-means mini batch"
print "3) Mean-shift"
print ""
walking_classifier_nb = raw_input("Please enter the number of the classifier you want to use: ")
print "Training walking classifier."
print "..."
print ""
if walking_classifier_nb == "1":
walking_classifier = KMeans(train_data_set, train_labels)
walking_classifier.train(2)
if walking_classifier_nb == "2":
walking_classifier = KMeansMiniBatch(train_data_set, train_labels)
walking_classifier.train(2)
if walking_classifier_nb == "3":
walking_classifier = MeanShift(train_data_set, train_labels)
walking_classifier.train()
walking_data_set, walking_labels = walking_classifier.get_walking_frames()
print "The walking classifier is trained."
print ""
print "Gaiter will now train and test the personal classifier."
print "List of personal classifiers:"
print "1) ADA boost"
print "2) Random forrest"
print "3) Support vector machine"
print "4) Logistic regression"
print ""
personal_classifier_nb = raw_input("Please enter the number of the classifier you want to use: ")
use_derivative = walking_data_set.shape[1] > 125
print "List of feature selectors:"
print "1) Select K best"
print "2) Tree-based feature selection"
print "3) L1-based feature selection"
print "4) None"
print ""
sel = raw_input("Please enter the number of the feature selector you want to use: ")
if sel == "1":
sel = raw_input("Please enter the number of the features you want to use: ")
selector = SelectKBest(f_classif, k=int(sel)).fit(walking_data_set, walking_labels)
if sel == "2":
clf = ExtraTreesClassifier()
selector = clf.fit(walking_data_set, walking_labels)
if sel == "3":
selector = LinearSVC(C=0.01, penalty="l1", dual=False).fit(walking_data_set, walking_labels)
if sel == "4":
selector = None
if selector is not None:
walking_data_set = selector.transform(walking_data_set)
print "Training personal classifier."
print "..."
print ""
if personal_classifier_nb == "1":
pc = AdaBoost(walking_data_set, walking_labels)
pc.train()
if personal_classifier_nb == "2":
pc = RandomForrest(walking_data_set, walking_labels)
pc.train()
if personal_classifier_nb == "3":
pc = SupportVectorMachine(walking_data_set, walking_labels)
pc.train()
if personal_classifier_nb == "4":
pc = LogisticRegression(walking_data_set, walking_labels)
pc.train()
wk = WalkingClassifier(test_data_set, test_labels)
wk.set_classifier(walking_classifier.get_classifier())
test_data_set, test_labels = wk.classify()
if selector is not None:
test_data_set = selector.transform(test_data_set)
val = Validator()
val.calculate_confusion_matrix(pc, test_data_set, test_labels)
data_path = raw_input("Please enter the path to the directory of the test data (end your "
"path with '/' and default=data/test/): ")
print "Loading test data from '"+data_path+"'."
print "..."
print ""
data_loader = DataLoader(data_path)
raw_data_list = data_loader.get_raw_data()
print str(len(raw_data_list)) + " files found."
print "Framing all the test data."
print "..."
print ""
# Frame all the data
unlabeled_framer = Framer(frame_size, frame_overlap)
for raw_data in raw_data_list:
unlabeled_framer.frame(raw_data)
framed_raw_data_list = unlabeled_framer.get_framed_raw_data_list()
length = 0
for frd in framed_raw_data_list:
length += len(frd.get_frames())
print "Some files were not large enough for framing... "+str(len(framed_raw_data_list))+\
" files are divided into "+str(length)+" frames."
print ""
print "Extracting features for unlabeled frames. This may take a while."
print "..."
print ""
time_feature_extractor = TimeDomainFeatureExtractor(use_derivative)
freq_feature_extractor = FrequencyDomainFeatureExtractor(use_derivative)
wav_feature_extractor = WaveletFeatureExtractor(use_derivative)
bumpy_data_set = defaultdict(list)
raw_data_count = 0
for framed_raw_data in framed_raw_data_list:
print str(round(raw_data_count/float(len(framed_raw_data_list))*100, 2))+" %"
entry = framed_raw_data.get_frames()[0].get_path()
for frame in framed_raw_data.get_frames():
featured_frame = time_feature_extractor.extract_features(frame)
featured_frame = freq_feature_extractor.extract_features(featured_frame)
featured_frame = wav_feature_extractor.extract_features(featured_frame)
featured_frame = add_pearson(featured_frame, False)
bumpy_data_set[entry].append(featured_frame)
raw_data_count = raw_data_count + 1
print "100 %"
print ""
print "Unlabeled features are calculated."
print "..."
print ""
labels = pc.label_data(bumpy_data_set, selector)
print(labels)
def flatten(featured_frame_list):
flat_list = list()
for f_frame in featured_frame_list:
features = f_frame.get_flat_features()
flat_list.append(features)
return flat_list
def extract_labels(featured_frame_list):
classes = list()
for featured_frame in featured_frame_list:
l = featured_frame.get_label()
classes.append(l)
return classes
if __name__ == '__main__':
main(sys.argv[1:])
|
mit
|
niwatoribaka/yeast-counter
|
yeast.py
|
1
|
16889
|
import cv2
import wx
import os
import sys # @UnusedImport
import commands
from matplotlib import pyplot as plt # @UnusedImport
from pylab import * # @UnusedWildImport
from GUI.settings_wizard import wizard as settings_wizard # @UnresolvedImport
from prepare_dp import auto_prepare
# Testing mode should be used with data points generated by
# generate_test.py
TESTING = None
# MODE list
# 0 -> custom normalized value method
# 1 -> harris corner detection
MODE = None
UNITS = 'cm'
# Each height must be evenly spaced excepting the low-flush
# HEIGHTS[-1]-HEIGHTS[0] must be a mutliple of HEIGHTS[-1]-HEIGHTS[-2]
HEIGHTS = [20, 35, 40, 45] # experiment (low-flush + 3 heights)
DIFF = [HEIGHTS[x + 1] - HEIGHTS[x] for x in range(len(HEIGHTS) - 1)]
# Fraction of user denoted significant shear (to calculate significant
# shear height automatically and to mark on line plot).
SIG_SHEAR = .75
DP_FOLDER = None
PREPARE_FIRST = None
class SettingsWizard(settings_wizard):
'''
Overrides the methods from the FormBuilder generated file.
'''
def user_exit(self, event):
exit()
def start_processing(self, event):
global TESTING, MODE, DP_FOLDER, PREPARE_FIRST
TESTING = self.select_testing.GetValue()
mode = self.select_mode.GetStringSelection()
if mode.__contains__('HSV'):
MODE = 0
elif mode.__contains__('Harris'):
MODE = 1
DP_FOLDER = self.dp_picker.GetPath()
PREPARE_FIRST = self.select_prepare.GetValue()
class DataPoint():
def __init__(self, dp_path):
global TESTING, MODE, SIG_SHEAR
if TESTING:
print 'TESTING MODE IS ON'
print 'MODE IS {0}'.format(MODE)
self.significant_shear = SIG_SHEAR
self.significant_shear = 1 - self.significant_shear
self.dp_path = dp_path
self.img_names = sort([f for f in os.listdir(dp_path) if os.path.isfile('{0}/{1}'.format(dp_path, f))])
if not TESTING:
assert len(HEIGHTS) == len(self.img_names)
self.imgs = [cv2.imread('{0}/{1}'.format(dp_path, fname)) for fname in self.img_names]
self.user_params = {i:
{
'testing_region':['ix', 'iy', 'x', 'y'],
}
for i in range(len(self.imgs))
}
if MODE == 0:
self.working_imgs = [cv2.cvtColor(img, cv2.COLOR_BGR2HSV) \
for img in self.imgs]
elif MODE == 1:
self.working_imgs = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype('float32') \
for img in self.imgs]
self.average_value = [0. for img in range(len(self.img_names))]
self.yeast_count = [0. for img in range(len(self.img_names))]
print 'Setting params'
self.set_user_params()
print 'Cropping'
self.crop_working(0, 200)
if MODE == 0:
self.pixel_method()
elif MODE == 1:
self.harris_method()
def pixel_method(self):
print 'Filtering borders'
self.filter()
print 'Normalizing'
self.normalize_V()
print 'Calculating averages'
self.average_V()
print 'Plotting'
self.plot()
def harris_method(self):
print 'Setting thresholds'
self.set_threshold()
print 'Counting yeast'
self.count_yeast()
print 'Plotting'
self.plot()
def normalize_V(self):
'''Normalizes each HSV image against its particular background.
The background is whatever was within the crop buffer but outside
the testing region.
Modifies self.working_imgs'''
for i in range(len(self.working_imgs)):
region = self.user_params[i]['testing_region']
self.working_imgs[i] = self.working_imgs[i].astype(float)
top_background_values = self.working_imgs[i][:, :, 2][0:region[1], :].flatten()
bottom_background_values = self.working_imgs[i][:, :, 2][region[3]:-1, :].flatten()
left_background_values = self.working_imgs[i][:, :, 2][:, 0:region[0]].flatten()
right_background_values = self.working_imgs[i][:, :, 2][:, region[2]:-1].flatten()
flat_background = concatenate((
top_background_values,
bottom_background_values,
left_background_values,
right_background_values,
))
repr_background = average(flat_background)
self.working_imgs[i][:, :, 2] /= repr_background
def suggest_lowpass_threshold(self):
pass
def filter(self):
'''Filters out very dark regions of the cropped HSVs.
The target of this filter is to remove the effect of the channel
border and miscellaneous garbage in between the chip top and bottom
when calculating the average value for the testing region.'''
# TODO -> set darkness_threshold to value suggested by
# self.suggest_lowpass_threshold()
# perhaps each image should have a separate darkness_threshold?
darkness_threshold = .8 * 255
print 'Low-pass filter set at: {0}'.format(darkness_threshold)
for i in range(len(self.working_imgs)):
print 'Min Intensity for image {0}: {1}'.format(i, min([min(col) for col in self.working_imgs[i][:, :, 2]]))
for x in range(len(self.working_imgs[i][:, :, 2])):
for y in range(len(self.working_imgs[i][:, :, 2][x])):
if self.working_imgs[i][:, :, 2][x][y] < darkness_threshold:
self.working_imgs[i][:, :, 2][x][y] = 1.
def average_V(self):
'''Stores the average value in the testing region in self.average_value'''
for i in range(len(self.img_names)):
region = self.user_params[i]['testing_region']
cropped = self.working_imgs[i][region[1]:region[3], region[0]:region[2]]
cropped = cropped[:, :, 2]
self.average_value[i] = sum([sum(row) for row in cropped]) / float(cropped.size)
def set_threshold(self):
'''
User interface for interactively setting the threshold
for the corner detector.
'''
global TESTING
self.threshold = [0. for i in range(len(self.imgs))]
self.dst = [cv2.cornerHarris(img, 2, 1, .04) for img in self.working_imgs]
self.dst = [cv2.dilate(img, None) for img in self.dst]
if sys.platform.startswith('win'):
import ctypes
user32 = ctypes.windll.user32
screen_res = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
else:
status, output = commands.getstatusoutput("xrandr | grep '*'")
if not status:
screen_res = tuple(
[int(dim) for dim in \
[
part for part in output.split('\n')[-1].split(' ') if part][0].split('x')
]
)
else:
screen_res = 800, 600
scale_width = screen_res[0] / float(img.shape[1])
scale_height = screen_res[1] / float(img.shape[0])
scale = min(scale_width, scale_height)
window_width = int(img.shape[1] * scale)
window_height = int(img.shape[0] * scale)
for i in range(len(self.working_imgs)):
if TESTING and i != 0:
self.threshold[i] = self.threshold[i - 1]
continue
cv2.namedWindow(self.img_names[i], cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.img_names[i], window_width, window_height)
cv2.createTrackbar('threshold', self.img_names[i], 0, 260, lambda new: None)
cv2.startWindowThread()
while True:
drawn_over = self.imgs[i].__copy__()
drawn_over[self.dst[i] > cv2.getTrackbarPos('threshold', self.img_names[i]) \
* self.dst[i].max() / 1000.] = [0, 0, 255]
cv2.imshow(self.img_names[i], drawn_over)
k = cv2.waitKey(40)
if k == 27:
print 'User Interrupt: exiting'
exit()
elif k != -1:
self.threshold[i] = cv2.getTrackbarPos('threshold', self.img_names[i]) \
* self.dst[i].max() / 1000.
break
cv2.destroyWindow(self.img_names[i])
cv2.waitKey(1)
def count_yeast(self):
for i in range(len(self.imgs)):
mask = self.dst[i] > self.threshold[i]
self.yeast_count[i] = float(sum(mask)) / len(mask)
def printdir(self):
'''Prints the content of the data point folder'''
print '{0}:'.format(self.dp_path)
for fname in self.img_names:
print '\t{0}'.format(fname)
def set_user_params(self):
'''Is responsible for displaying each image of the data point folder.
Works with self.paramsUI to gather input from the user'''
self.stage = 0
global TESTING
for i in range(len(self.imgs)):
if TESTING and i != 0:
self.user_params[i]['testing_region'] = self.user_params[0]['testing_region']
else:
self.i = i
self.drawing_overlay = self.imgs[i].__copy__()
if sys.platform.startswith('win'):
import ctypes
user32 = ctypes.windll.user32
screen_res = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
else:
status, output = commands.getstatusoutput("xrandr | grep '*'")
if not status:
screen_res = tuple(
[int(dim) for dim in \
[
part for part in output.split('\n')[-1].split(' ') if part][0].split('x')
]
)
else:
screen_res = 800, 600
scale_width = screen_res[0] / float(self.imgs[i].shape[1])
scale_height = screen_res[1] / float(self.imgs[i].shape[0])
scale = min(scale_width, scale_height)
window_width = int(self.imgs[i].shape[1] * scale)
window_height = int(self.imgs[i].shape[0] * scale)
cv2.namedWindow(self.img_names[i], cv2.WINDOW_NORMAL)
cv2.startWindowThread()
cv2.resizeWindow(self.img_names[i], window_width, window_height)
self.ix, self.iy = -1, -1
if i == 0:
cv2.putText(self.drawing_overlay, 'Select testing region', (20, 100), \
cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 0), 5)
else:
cv2.putText(self.drawing_overlay, 'Place testing region', (20, 100), \
cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 0), 5)
cv2.setMouseCallback(self.img_names[i], self.paramsUI)
cv2.imshow(self.img_names[i], self.drawing_overlay)
while True:
k = cv2.waitKey(5)
if k == 27:
print 'User Interrupt: exiting'
exit()
elif k == ord('n'):
self.stage = 0
cv2.destroyWindow(self.img_names[i])
cv2.waitKey(1)
break
def paramsUI(self, event, x, y, flags, param):
'''Modifies self.drawing_overlay in response to user input
Sets the testing region with respect to the unaltered image'''
if not self.stage and not self.i:
if event == cv2.EVENT_LBUTTONDOWN:
self.ix, self.iy = x, y
elif event == cv2.EVENT_LBUTTONUP:
self.user_params[self.i]['testing_region'] = [self.ix, self.iy, x, y]
cv2.rectangle(self.drawing_overlay, (self.ix, self.iy), (x, y), (0, 255, 0), 3)
cv2.putText(self.drawing_overlay, "Press 'n' to continue", (20, 190), \
cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 0), 5)
self.stage = 1
cv2.imshow(self.img_names[self.i], self.drawing_overlay)
elif not self.stage:
if event == cv2.EVENT_LBUTTONDOWN:
testing_region = self.user_params[0]['testing_region']
w = testing_region[2] - testing_region[0]
h = testing_region[3] - testing_region[1]
self.user_params[self.i]['testing_region'] = [x, y, x + w, y + h]
cv2.rectangle(self.drawing_overlay,
(x, y), (x + w, y + h),
(0, 255, 0), 3)
cv2.putText(self.drawing_overlay, "Press 'n' to continue", (20, 190), \
cv2.FONT_HERSHEY_COMPLEX, 2, (255, 255, 0), 5)
self.stage = 1
cv2.imshow(self.img_names[self.i], self.drawing_overlay)
def crop_working(self, x_cropbuff, y_cropbuff):
'''Crops the working images to the testing region.
The x and y crop buffers allow for other functions in self
to have a background region to normalize to.
Modifies self.working_imgs and self.user_params -> 'testing_region'.'''
for i in range(len(self.working_imgs)):
region = self.user_params[i]['testing_region']
self.working_imgs[i] = self.working_imgs[i][region[1] - y_cropbuff:region[3] + y_cropbuff,
region[0] - x_cropbuff:region[2] + x_cropbuff]
self.imgs[i] = self.imgs[i][region[1] - y_cropbuff:region[3] + y_cropbuff,
region[0] - x_cropbuff:region[2] + x_cropbuff]
self.user_params[i]['testing_region'] = [x_cropbuff, y_cropbuff,
x_cropbuff + region[2] - region[0], y_cropbuff + region[3] - region[1]]
def prepare_data(self):
'''Uses self.average_value to construct self.coverage
Also calculates self.significant_shear_height and self.x_significant_shear
self.coverage[0] = 1, self.coverage[-1] = 0'''
if MODE == 0:
self.data = self.average_value
elif MODE == 1:
self.data = self.yeast_count
norm = self.data[-1]
self.data = [entry / norm for entry in self.data]
self.data = [1 - v for v in self.data]
norm = self.data[0]
self.data = [entry / norm for entry in self.data]
left_bound = 0
for i in range(len(self.data)):
if self.data[i] < self.significant_shear:
break
left_bound = i
self.x_significant_shear = (self.significant_shear - self.data[left_bound]) * \
(1. / (self.data[left_bound + 1] - self.data[left_bound])) + left_bound
global HEIGHTS
left = int(floor(self.x_significant_shear))
right = int(ceil(self.x_significant_shear))
self.significant_shear_height = HEIGHTS[left] + DIFF[left] * (self.x_significant_shear - floor(self.x_significant_shear))
def plot(self):
global TESTING, UNITS, HEIGHTS
'''Prepares the data using self.prepare_data and then
graphs the data on a plot.'''
self.prepare_data()
plt.plot(HEIGHTS, self.data)
plt.hlines(self.significant_shear, 0, HEIGHTS[-1])
plt.vlines(self.significant_shear_height, -1, 2)
print 'Significant shear at image {0}'.format(self.x_significant_shear)
if not TESTING:
print 'Theoretical significant shear at height {0} {1}'.format(self.significant_shear_height, UNITS)
plt.ylim([-1, 2])
plt.xlim([HEIGHTS[0], HEIGHTS[-1]])
plt.xlabel('Height ({0})'.format(UNITS))
plt.ylabel('Coverage')
plt.title(self.dp_path.split('/')[-1])
try:
os.mkdir('{0}/res'.format(self.dp_path))
except:
pass
plt.savefig('{0}/res/results.png'.format(self.dp_path))
with open('{0}/res/results.txt'.format(self.dp_path), 'w') as f:
global MODE
f.write('{0}\nMODE {1}\n'.format(str(self.significant_shear_height), MODE))
if __name__=='__main__':
app = wx.App(False)
wiz = SettingsWizard(None)
wiz.RunWizard(wiz.m_pages[0])
wiz.Destroy()
app.MainLoop()
if PREPARE_FIRST:
auto_prepare(DP_FOLDER)
dp = DataPoint(DP_FOLDER)
|
mit
|
RosesTheN00b/BudgetButlerWeb
|
butler_offline/core/DBManager.py
|
1
|
6017
|
'''
Read panda files
'''
from _io import StringIO
from butler_offline.core import file_system
from butler_offline.core.database import Database
import pandas as pd
KEYWORD_EINZELBUCHUNGEN = 'Einzelbuchungen'
KEYWORD_DAUERAUFRTAEGE = 'Dauerauftraege'
KEYWORD_GEMEINSAME_BUCHUNGEN = 'Gemeinsame Buchungen'
KEYWORD_SPARBUCHUNGEN = 'Sparbuchungen'
KEYWORD_SPARKONTOS = 'Sparkontos'
KEYWORD_DEPOTWERTE = 'Depotwerte'
KEYWORD_ORDER = 'Order'
KEYWORD_ORDERDAUERAUFTRAG = 'Dauerauftr_Ordr'
KEYWORD_DEPOTAUSZUEGE = 'Depotauszuege'
KEYWORD_LINEBREAK = '\n'
def _to_table(content):
return pd.read_csv(StringIO(content))
def read(nutzername, ausgeschlossene_kategorien):
if not file_system.instance().read(database_path_from(nutzername)):
neue_datenbank = Database(nutzername)
write(neue_datenbank)
file_content = file_system.instance().read(database_path_from(nutzername))
parser = DatabaseParser()
parser.from_string(file_content)
database = Database(nutzername, ausgeschlossene_kategorien=ausgeschlossene_kategorien)
database.einzelbuchungen.parse(_to_table(parser.einzelbuchungen()))
print('READER: Einzelbuchungen gelesen')
database.dauerauftraege.parse(_to_table(parser.dauerauftraege()))
print('READER: Daueraufträge gelesen')
database.gemeinsamebuchungen.parse(_to_table(parser.gemeinsame_buchungen()))
print('READER: Gemeinsame Buchungen gelesen')
if parser.sparkontos():
database.sparkontos.parse(_to_table(parser.sparkontos()))
print('READER: Sparkontos gelesen')
if parser.sparbuchungen():
database.sparbuchungen.parse(_to_table(parser.sparbuchungen()))
print('READER: Sparbuchungen gelesen')
if parser.depotwerte():
database.depotwerte.parse(_to_table(parser.depotwerte()))
print('READER: Depotwerte gelesen')
if parser.order():
database.order.parse(_to_table(parser.order()))
print('READER: Depotwerte gelesen')
if parser.depotauszuege():
database.depotauszuege.parse(_to_table(parser.depotauszuege()))
print('READER: Depotauszuege gelesen')
if parser.order_dauerauftrag():
database.orderdauerauftrag.parse(_to_table(parser.order_dauerauftrag()))
print('READER: Order Dauerauftrag gelesen')
print('READER: Refreshe Database')
database.refresh()
print('READER: Refresh done')
return database
def wrap_tableheader(table_header_name):
return '{} {} {}'.format(KEYWORD_LINEBREAK, table_header_name, KEYWORD_LINEBREAK)
def write(database):
content = database.einzelbuchungen.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_DAUERAUFRTAEGE)
content += database.dauerauftraege.content.to_csv(index=False)
content += wrap_tableheader(KEYWORD_GEMEINSAME_BUCHUNGEN)
content += database.gemeinsamebuchungen.content.to_csv(index=False)
content += wrap_tableheader(KEYWORD_SPARBUCHUNGEN)
content += database.sparbuchungen.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_SPARKONTOS)
content += database.sparkontos.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_DEPOTWERTE)
content += database.depotwerte.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_ORDER)
content += database.order.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_ORDERDAUERAUFTRAG)
content += database.orderdauerauftrag.get_static_content().to_csv(index=False)
content += wrap_tableheader(KEYWORD_DEPOTAUSZUEGE)
content += database.depotauszuege.get_static_content().to_csv(index=False)
file_system.instance().write(database_path_from(database.name), content)
print("WRITER: All Saved")
def database_path_from(username):
return '../Database_' + username + '.csv'
class DatabaseParser:
def __init__(self):
self._reader = MultiPartCsvReader(
set([
KEYWORD_EINZELBUCHUNGEN,
KEYWORD_DAUERAUFRTAEGE,
KEYWORD_GEMEINSAME_BUCHUNGEN,
KEYWORD_SPARBUCHUNGEN,
KEYWORD_SPARKONTOS,
KEYWORD_DEPOTWERTE,
KEYWORD_ORDER,
KEYWORD_DEPOTAUSZUEGE,
KEYWORD_ORDERDAUERAUFTRAG
]),
start_token=KEYWORD_EINZELBUCHUNGEN)
def from_string(self, lines):
self._reader.from_string(lines)
def einzelbuchungen(self):
return self._reader.get_string(KEYWORD_EINZELBUCHUNGEN)
def dauerauftraege(self):
return self._reader.get_string(KEYWORD_DAUERAUFRTAEGE)
def gemeinsame_buchungen(self):
return self._reader.get_string(KEYWORD_GEMEINSAME_BUCHUNGEN)
def sparbuchungen(self):
return self._reader.get_string(KEYWORD_SPARBUCHUNGEN)
def sparkontos(self):
return self._reader.get_string(KEYWORD_SPARKONTOS)
def depotwerte(self):
return self._reader.get_string(KEYWORD_DEPOTWERTE)
def order(self):
return self._reader.get_string(KEYWORD_ORDER)
def depotauszuege(self):
return self._reader.get_string(KEYWORD_DEPOTAUSZUEGE)
def order_dauerauftrag(self):
return self._reader.get_string(KEYWORD_ORDERDAUERAUFTRAG)
class MultiPartCsvReader:
def __init__(self, token, start_token):
self._token = token
self._start_token = start_token
self._tables = {}
def from_string(self, lines):
self._tables = dict.fromkeys(self._token, '')
mode = self._start_token
for line in lines:
line = line.strip()
if line == '':
continue
if line in self._token:
mode = line
continue
if not ',' in line:
break
self._tables[mode] = self._tables[mode] + KEYWORD_LINEBREAK + line
def get_string(self, token):
return self._tables[token].strip()
|
agpl-3.0
|
wangjohn/wallace
|
wallace/fitness_evaluation_methods/fitness_evaluation_method.py
|
1
|
1417
|
import sklearn.metrics
class FitnessEvaluationMethod(object):
@classmethod
def evaluate_fitness(klass, predicted_results, true_results):
raise NotImplementedError()
@classmethod
def evaluation_type(klass):
return "minimizer"
##########################
### Regression Metrics ###
##########################
class MeanSquaredError(FitnessEvaluationMethod):
@classmethod
def evaluate_fitness(klass, predicted_results, true_results):
return sklearn.metrics.mean_squared_error(true_results, predicted_results)
class R2Score(FitnessEvaluationMethod):
@classmethod
def evaluate_fitness(klass, predicted_results, true_results):
return sklearn.metrics.r2_score(true_results, predicted_results)
@classmethod
def evaluation_type(klass):
return "maximizer"
class MeanAbsoluteError(FitnessEvaluationMethod):
@classmethod
def evaluate_fitness(klass, predicted_results, true_results):
return sklearn.metrics.mean_absolute_error(true_results, predicted_results)
##############################
### Classification Metrics ###
##############################
class F1Score(FitnessEvaluationMethod):
@classmethod
def evaluate_fitness(klass, predicted_results, true_results):
return sklearn.metrics.f1_score(true_results, predicted_results)
@classmethod
def evaluation_type(klass):
return "maximizer"
|
mit
|
JT5D/scikit-learn
|
sklearn/datasets/samples_generator.py
|
2
|
51932
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import numpy as np
from scipy import linalg
from ..preprocessing import LabelBinarizer
from ..utils import array2d, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = sample_without_replacement(2 ** dimensions, samples,
random_state=rng).astype('>u4')
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
return_indicator=False, random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
n = n_classes + 1
while (not allow_unlabeled and n == 0) or n > n_classes:
n = generator.poisson(n_labels)
# pick n classes
y = []
while len(y) != n:
# pick a class with probability P(c)
c = generator.multinomial(1, p_c).argmax()
if not c in y:
y.append(c)
# pick a non-zero document length by rejection sampling
k = 0
while k == 0:
k = generator.poisson(length)
# generate a document of length k words
x = np.zeros(n_features, dtype=int)
for i in range(k):
if len(y) == 0:
# if sample does not belong to any class, generate noise word
w = generator.randint(n_features)
else:
# pick a class and generate an appropriate word
c = y[generator.randint(len(y))]
w = generator.multinomial(1, p_w_c[:, c]).argmax()
x[w] += 1
return x, y
X, Y = zip(*[sample_example() for i in range(n_samples)])
if return_indicator:
lb = LabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
return np.array(X, dtype=np.float64), Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples / 2, dtype=np.intp),
np.ones(n_samples / 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = array2d(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
from ..utils.fixes import qr_economic
u, _ = qr_economic(generator.randn(n_samples, n))
v, _ = qr_economic(generator.randn(n_features, n))
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
bsd-3-clause
|
lewisc/spark-tk
|
regression-tests/sparktkregtests/testcases/dicom/dicom_filter_keyword_test.py
|
13
|
9391
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.filter functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
import random
from lxml import etree
class DicomFilterKeywordsTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomFilterKeywordsTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
def test_filter_one_column_one_result_basic(self):
"""test filter with one unique key"""
# get the pandas frame for ease of access
metadata = self.dicom.metadata.to_pandas()
# grab a random row and extract the SOPInstanceUID from that record
random_row_index = random.randint(0, self.dicom.metadata.count() - 1)
random_row = metadata["metadata"][random_row_index]
xml_data = etree.fromstring(random_row.encode("ascii", "ignore"))
random_row_sopi_id = xml_data.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
# get all of the records with our randomly selected sopinstanceuid
# since sopinstanceuid is supposed to be unique for each record
# we should only get back the record which we randomly selected above
self.dicom.filter_by_keywords({"SOPInstanceUID" : random_row_sopi_id })
# check that our result is correct
# we should have gotten back from filter the row
# which we randomly selected
self.assertEqual(self.dicom.metadata.count(), 1)
pandas = self.dicom.metadata.to_pandas()["metadata"]
record = pandas[0]
self.assertEqual(str(random_row), str(record))
def test_filter_one_col_multi_result_basic(self):
"""test filter by keyword with one keyword mult record result"""
# get pandas frame for ease of access
metadata = self.dicom.metadata.to_pandas()
# grab a random row and extract the patient id
first_row = metadata["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# filter the records ourselves to get the expected result
expected_result = self._filter({"PatientID" : first_row_patient_id })
# get all of the records with that patient id
self.dicom.filter_by_keywords({"PatientID" : first_row_patient_id })
# get the pandas frame for ease of access
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
# ensure that our expected result matches what dicom returned
self.assertEqual(len(expected_result), self.dicom.metadata.count())
for record, filtered_record in zip(expected_result, pandas_result):
self.assertEqual(record, filtered_record.encode("ascii", "ignore"))
def test_filter_multiple_columns_basic(self):
"""test filter with multiple key vals"""
# first we will generate a filter randomly by
# randomly selecting a row and extracting values that we want to use
keyword_filter = {}
metadata = self.dicom.metadata.to_pandas()["metadata"]
first_row = metadata[0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
first_row_body_part = xml_data.xpath(self.query.replace("KEYWORD", "BodyPartExamined"))[0]
keyword_filter["PatientID"] = first_row_patient_id
keyword_filter["BodyPartExamined"] = first_row_body_part
# now we generate our expected result by filtering ourselves
matching_records = self._filter(keyword_filter)
# get the records which match our filter
self.dicom.filter_by_keywords(keyword_filter)
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
# finally we check to ensure that dicom's result matches our expected result
self.assertEqual(len(matching_records), self.dicom.metadata.count())
for expected_record, actual_record in zip(matching_records, pandas_result):
ascii_actual_result = actual_record.encode("ascii", "ignore")
self.assertEqual(ascii_actual_result, expected_record)
def test_filter_invalid_column(self):
"""test filter invalid key"""
self.dicom.filter_by_keywords({ "invalid keyword" : "value" })
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_multiple_invalid_columns(self):
"""test filter mult invalid keys"""
self.dicom.filter_by_keywords({ "invalid" : "bla", "another_invalid_col" : "bla" })
self.assertEqual(0, self.dicom.metadata.count())
def test_valid_keyword_zero_results(self):
"""test filter with key-value pair, key exists but no matches"""
self.dicom.filter_by_keywords({ "SOPInstanceUID" : "2" })
self.assertEqual(0, self.dicom.metadata.count())
def test_invalid_value_type(self):
"""test filter with key-value pair, key exists but value is not type of str"""
with self.assertRaisesRegexp(TypeError, "both keyword and value should be of <type 'str'>"):
self.dicom.filter_by_keywords({"SOPInstanceUID" : 2})
def test_filter_invalid_valid_col_mix(self):
"""test filter with mix of valid and invalid keys"""
# first we get a valid patient id by selecting the first row
# and extracting its patient id
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# now we ask dicom to filter using a filter which is a mix of a valid key-value
# pair and an invalid key-value pair
self.dicom.filter_by_keywords({ "PatientID" : patient_id, "Invalid" : "bla" })
# since there are no records which meet BOTH key value criterias
# we assert that 0 records were returned
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_invalid_type(self):
"""test filter invalid param type"""
with self.assertRaisesRegexp(Exception, "incomplete format"):
self.dicom.filter_by_keywords(1)
self.dicom.metadata.count()
def test_filter_unicode_columns(self):
"""test filter by keyword with unicode keys"""
# the logic is the same as test_filter_one_column above
# the only difference is here we are giving the keys as unicode
# strings instead of standard python strings
metadata = self.dicom.metadata.to_pandas()
first_row = metadata["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
expected_result = self._filter({ "PatientID" : first_row_patient_id })
self.dicom.filter_by_keywords({ u'PatientID' : first_row_patient_id })
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
self.assertEqual(len(expected_result), self.dicom.metadata.count())
for record, filtered_record in zip(expected_result, pandas_result):
self.assertEqual(record, filtered_record.encode("ascii", "ignore"))
def _filter(self, keywords):
"""generate our expected result by filtering the records"""
# here we are generating the expected result from the key-value
# filter so that we can compare it to what dicom returns
# we will iterate through the dicom metadata to get all of the
# records which match our key-value criteria
matching_records = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
for row in pandas_metadata:
ascii_xml = row.encode("ascii", "ignore")
xml = etree.fromstring(row.encode("ascii", "ignore"))
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))[0]
if this_row_keyword_value == keywords[keyword]:
if ascii_xml not in matching_records:
matching_records.append(ascii_xml)
return matching_records
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
adrpar/incubator-airflow
|
airflow/www/views.py
|
1
|
94327
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from past.builtins import basestring, unicode
import ast
import os
import pkg_resources
import socket
from functools import wraps
from datetime import datetime, timedelta
import dateutil.parser
import copy
import json
import bleach
from collections import defaultdict
import inspect
from textwrap import dedent
import traceback
import sqlalchemy as sqla
from sqlalchemy import or_, desc, and_, union_all
from flask import (
redirect, url_for, request, Markup, Response, current_app, render_template, make_response)
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.tools import iterdecode
from flask_login import flash
from flask._compat import PY2
from jinja2.sandbox import ImmutableSandboxedEnvironment
import markdown
import nvd3
from wtforms import (
Form, SelectField, TextAreaField, PasswordField, StringField, validators)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.settings import Session
from airflow.models import XCom, DagRun
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.models import BaseOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.logging import LoggingMixin
from airflow.utils.json import json_ser
from airflow.utils.state import State
from airflow.utils.db import provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils import logging as log_utils
from airflow.utils.dates import infer_time_unit, scale_time_units
from airflow.www import utils as wwwutils
from airflow.www.forms import DateTimeForm, DateTimeWithNumRunsForm
from airflow.configuration import AirflowConfigException
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
DEFAULT_SENSITIVE_VARIABLE_FIELDS = (
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'access_token',
)
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def should_hide_value_for_key(key_name):
return any(s in key_name for s in DEFAULT_SENSITIVE_VARIABLE_FIELDS) \
and conf.getboolean('admin', 'hide_sensitive_variable_fields')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
def dag_stats(self):
ds = models.DagStat
session = Session()
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
def task_stats(self):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
session = Session()
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
def dag_details(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
session = settings.Session()
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.getfqdn()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=socket.getfqdn(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {
'headers': {k: v for k, v in request.headers},
}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
if hasattr(current_user, 'username'):
d['username'] = current_user.username
return wwwutils.json_response(d)
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
@wwwutils.action_logging
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = os.path.join(BASE_LOG_FOLDER, log_relative)
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
session = Session()
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
if ti is None:
log = "*** Task instance did not exist in the DB\n"
else:
# load remote logs
remote_log_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
remote_log_loaded = False
if remote_log_base:
remote_log_path = os.path.join(remote_log_base, log_relative)
remote_log = ""
# Only display errors reading the log if the task completed or ran at least
# once before (otherwise there won't be any remote log stored).
ti_execution_completed = ti.state in {State.SUCCESS, State.FAILED}
ti_ran_more_than_once = ti.try_number > 1
surface_log_retrieval_errors = (
ti_execution_completed or ti_ran_more_than_once)
# S3
if remote_log_path.startswith('s3:/'):
remote_log += log_utils.S3Log().read(
remote_log_path, return_error=surface_log_retrieval_errors)
remote_log_loaded = True
# GCS
elif remote_log_path.startswith('gs:/'):
remote_log += log_utils.GCSLog().read(
remote_log_path, return_error=surface_log_retrieval_errors)
remote_log_loaded = True
# unsupported
else:
remote_log += '*** Unsupported remote log location.'
if remote_log:
log += ('*** Reading remote log from {}.\n{}\n'.format(
remote_log_path, remote_log))
# We only want to display the
# local logs while the task is running if a remote log configuration is set up
# since the logs will be transfered there after the run completes.
# TODO(aoen): One problem here is that if a task is running on a worker it
# already ran on, then duplicate logs will be printed for all of the previous
# runs of the task that already completed since they will have been printed as
# part of the remote log section above. This can be fixed either by streaming
# logs to the log servers as tasks are running, or by creating a proper
# abstraction for multiple task instance runs).
if not remote_log_loaded or ti.state == State.RUNNING:
if os.path.exists(loc):
try:
f = open(loc)
log += "*** Reading local log.\n" + "".join(f.readlines())
f.close()
except:
log = "*** Failed to load local log file: {0}.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = os.path.join(
"http://{ti.hostname}:{WORKER_LOG_SERVER_PORT}/log", log_relative
).format(**locals())
log += "*** Log file isn't local.\n"
log += "*** Fetching here: {url}\n".format(**locals())
try:
import requests
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
response.raise_for_status()
log += '\n' + response.text
except:
log += "*** Failed to fetch log file from worker.\n".format(
**locals())
if PY2 and not isinstance(log, unicode):
log = log.decode('utf-8')
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title="Log", task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running. In most cases this just means that the task will probably be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow administrator for assistance."""
.format(
"- This task instance already ran and had it's state changed manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
def xcom(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
session = Session()
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = datetime.now()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
@expose('/blocked')
@login_required
def blocked(self):
session = settings.Session()
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=True)
flash("Marked success on {} task instances".format(len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=State.SUCCESS,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as successful:"),
details=details)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id==dag.dag_id,
DR.execution_date<=base_date,
DR.execution_date>=min_date)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = datetime.now() - dateutil.parser.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
DR = models.DagRun
drs = (
session.query(DR)
.filter_by(dag_id=dag_id)
.order_by(desc(DR.execution_date)).all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
class GraphForm(Form):
execution_date = SelectField("DAG run", choices=dr_choices)
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(
data={'execution_date': dttm.isoformat(), 'arrange': arrange})
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dr_state),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
@wwwutils.action_logging
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=600, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$( document ).trigger('chartload')" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
def tries(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=600, width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if base_date:
base_date = dateutil.parser.parse(base_date)
else:
base_date = dag.latest_execution_date or datetime.now()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=600, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
ts = ti.execution_date
if dag.schedule_interval and dag.following_schedule(ts):
ts = dag.following_schedule(ts)
if ti.end_date:
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
session.close()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
for ti in tis:
end_date = ti.end_date if ti.end_date else datetime.now()
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': ti.state,
'executionDate': ti.execution_date.isoformat(),
})
states = {ti.state: ti.state for ti in tis}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25,
}
session.commit()
session.close()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
def task_instances(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
out = str(request.files['file'].read())
d = json.loads(out)
except Exception:
flash("Missing file or syntax error.")
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
qry = session.query(DM)
qry_fltr = []
if do_filter and owner_mode == 'ldapgroup':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners.in_(current_user.ldap_groups)
).all()
elif do_filter and owner_mode == 'user':
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
).all()
else:
qry_fltr = qry.filter(
~DM.is_subdag, DM.is_active
).all()
# optionally filter out "paused" dags
if hide_paused:
orm_dags = {dag.dag_id: dag for dag in qry_fltr if not dag.is_paused}
else:
orm_dags = {dag.dag_id: dag for dag in qry_fltr}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
all_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags,
orm_dags=orm_dags,
hide_paused=hide_paused,
all_dag_ids=all_dag_ids)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = datetime.now()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if should_hide_value_for_key(model.key):
return Markup('*' * 8)
return getattr(model, name)
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
def action_varexport(self, ids):
V = models.Variable
session = settings.Session()
qry = session.query(V).filter(V.id.in_(ids)).all()
session.close()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
page_size = 20
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
def action_new_delete(self, ids):
session = settings.Session()
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun)\
.filter(models.DagRun.id.in_(ids))\
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
models.DagStat.set_dirty(row.dag_id, session=session)
dirty_ids.append(row.dag_id)
models.DagStat.clean_dirty(dirty_ids, session=session)
session.close()
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_dagrun_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_dagrun_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_dagrun_state(ids, State.SUCCESS)
@provide_session
def set_dagrun_state(self, ids, target_state, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = target_state
if target_state == State.RUNNING:
dr.start_date = datetime.now()
else:
dr.end_date = datetime.now()
session.commit()
models.DagStat.clean_dirty(dirty_ids, session=session)
flash(
"{count} dag runs were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
can_delete = True
page_size = 500
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected records?'))
def action_delete(self, ids):
"""
As a workaround for AIRFLOW-277, this method overrides Flask-Admin's ModelView.action_delete().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.delete_task_instances(ids)
else:
super(TaskInstanceModelView, self).action_delete(ids)
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@provide_session
def delete_task_instances(self, ids, session=None):
try:
TI = models.TaskInstance
count = 0
for id in ids:
task_id, dag_id, execution_date = id.split(',')
execution_date = datetime.strptime(execution_date, '%Y-%m-%d %H:%M:%S')
count += session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).delete()
session.commit()
flash("{count} task instances were deleted".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to delete', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = dateutil.parser.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma seperated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, LoggingMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
self.logger.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
self.logger.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
|
apache-2.0
|
abimannans/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
rsignell-usgs/notebook
|
system-test/Theme_2_Extreme_Events/Scenario_2A/ModelDataCompare_Winds/Model_Obs_Compare_Winds.py
|
3
|
16991
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ># IOOS System Test: [Extreme Events Theme:](https://github.com/ioos/system-test/wiki/Development-of-Test-Themes#theme-2-extreme-events) Coastal Inundation
# <markdowncell>
# ### Can we compare observed and modeled wind speeds at stations located within a bounding box?
# This notebook is based on [IOOS System Test: Inundation](http://nbviewer.ipython.org/github/ioos/system-test/blob/master/Theme_2_Extreme_Events/Scenario_2A_Coastal_Inundation/Scenario_2A_Water_Level_Signell.ipynb)
#
# Methodology:
# * Define temporal and spatial bounds of interest, as well as parameters of interest
# * Search for available service endpoints in the NGDC CSW catalog meeting search criteria
# * Extract OPeNDAP data endpoints from model datasets and SOS endpoints from observational datasets
# * Obtain observation data sets from stations within the spatial boundaries
# * Using DAP (model) endpoints find all available models data sets that fall in the area of interest, for the specified time range, and extract a model grid cell closest to all the given station locations
# * Plot observation stations on a map (red marker for model grid points) and draw a line between each station and the model grid point used for comparison
# * Plot modeled and observed time series wind speed on same axes for comparison
#
# <headingcell level=4>
# import required libraries
# <codecell>
import datetime as dt
from warnings import warn
import folium
from IPython.display import HTML
import iris
from iris.exceptions import CoordinateNotFoundError, ConstraintMismatchError
import matplotlib.pyplot as plt
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import pandas as pd
from pyoos.collectors.coops.coops_sos import CoopsSos
import requests
from utilities import (fes_date_filter, coops2df, find_timevar, find_ij, nearxy, service_urls, mod_df,
get_coordinates, inline_map, get_Coops_longName, css_styles)
css_styles()
# <headingcell level=4>
# Speficy Temporal and Spatial conditions
# <codecell>
bounding_box_type = "box"
# Bounding Box [lon_min, lat_min, lon_max, lat_max]
area = {'Hawaii': [-160.0, 18.0, -154., 23.0],
'Gulf of Maine': [-72.0, 41.0, -69.0, 43.0],
'New York harbor region': [-75., 39., -71., 41.5],
'Puerto Rico': [-75, 12, -55, 26],
'East Coast': [-77, 36, -73, 38],
'North West': [-130, 38, -121, 50],
'Gulf of Mexico': [-92, 28, -84, 31],
'Arctic': [-179, 63, -140, 80]}
bounding_box = area['Gulf of Mexico']
#temporal range
jd_now = dt.datetime.utcnow()
jd_start, jd_stop = jd_now - dt.timedelta(days=11), jd_now + dt.timedelta(days=3)
start_date = jd_start.strftime('%Y-%m-%d %H:00')
stop_date = jd_stop.strftime('%Y-%m-%d %H:00')
jd_start = dt.datetime.strptime(start_date, '%Y-%m-%d %H:%M')
jd_stop = dt.datetime.strptime(stop_date, '%Y-%m-%d %H:%M')
print start_date,'to',stop_date
# <headingcell level=4>
# Specify data names of interest
# <codecell>
#put the names in a dict for ease of access
data_dict = {}
sos_name = 'Winds'
data_dict['winds'] = {
"u_names":['eastward_wind', 'u-component_of_wind', 'u_component_of_wind', 'u_component_of_wind_height_above_ground', 'u-component_of_wind_height_above_ground', 'ugrd10m', 'wind'],
"v_names":['northward_wind', 'v-component_of_wind', 'v-component_of_wind_height_above_ground', 'vgrd10m', 'wind'],
"sos_name":['winds']}
# <headingcell level=3>
# Check CSW for bounding box filter capabilities
# <codecell>
endpoints = ['http://www.nodc.noaa.gov/geoportal/csw',
'http://www.ngdc.noaa.gov/geoportal/csw',
'http://catalog.data.gov/csw-all',
'http://geoport.whoi.edu/geoportal/csw',
'https://edg.epa.gov/metadata/csw',
'http://cmgds.marine.usgs.gov/geonetwork/srv/en/csw',
'http://cida.usgs.gov/gdp/geonetwork/srv/en/csw',
'http://geodiscover.cgdi.ca/wes/serviceManagerCSW/csw',
'http://geoport.whoi.edu/gi-cat/services/cswiso']
bbox_endpoints = []
for url in endpoints:
# queryables = []
try:
csw = CatalogueServiceWeb(url, timeout=20)
except BaseException:
print "Failure - %s - Timed out" % url
if "BBOX" in csw.filters.spatial_operators:
print "Success - %s - BBOX Query supported" % url
bbox_endpoints.append(url)
else:
print "Failure - %s - BBOX Query NOT supported" % url
# <markdowncell>
# ### Check the CSW endpoints for wind data in the date range specified
# <markdowncell>
# <div class="warning"><strong>Data discovery is limited</strong> - Most of the CSW endpoints don't have recent wind data available.</div>
# <codecell>
for endpoint in bbox_endpoints:
print endpoint
csw = CatalogueServiceWeb(endpoint,timeout=60)
# convert User Input into FES filters
start,stop = fes_date_filter(start_date,stop_date)
bbox = fes.BBox(bounding_box)
#use the search name to create search filter
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',literal='*%s*' % val,
escapeChar='\\',wildCard='*',singleChar='?') for val in data_dict['winds']['u_names']])
filter_list = [fes.And([ bbox, start, stop, or_filt]) ]
# filter_list = [fes.And([ bbox, or_filt]) ]
# connect to CSW, explore it's properties
# try request using multiple filters "and" syntax: [[filter1,filter2]]
try:
csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')
except Exception as e:
print 'ERROR - ' + str(e)
else:
print str(len(csw.records)) + " csw records found"
# Print titles
for rec, item in csw.records.items():
print(item.title)
print '\n'
# <markdowncell>
# ### Use NGDC CSW
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC Geoportal
csw = CatalogueServiceWeb(endpoint,timeout=60)
# convert User Input into FES filters
start,stop = fes_date_filter(start_date,stop_date)
bbox = fes.BBox(bounding_box)
#use the search name to create search filter
or_filt = fes.Or([fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?') for val in data_dict['winds']['u_names']])
filter_list = [fes.And([ bbox, start, stop, or_filt]) ]
# connect to CSW, explore it's properties
# try request using multiple filters "and" syntax: [[filter1,filter2]]
try:
csw.getrecords2(constraints=filter_list, maxrecords=1000, esn='full')
except Exception as e:
print 'ERROR - ' + str(e)
# <markdowncell>
# DAP URLS
# <codecell>
# Now print the DAP endpoints
dap_urls = service_urls(csw.records)
#remove duplicates and organize
dap_urls = sorted(set(dap_urls))
print "\n".join(dap_urls)
# <markdowncell>
# SOS URLs
# <markdowncell>
# <div class="error"><strong>CDIP buoys shouldn't be appearing</strong> - The CDIP buoys are located in the Pacific but are coming up in the Gulf of Mexico bounding box searches. See [issue](https://github.com/ioos/system-test/issues/133).</div>
# <codecell>
sos_urls = service_urls(csw.records,service='sos:url')
#Add known NDBC SOS
# sos_urls.append("http://sdf.ndbc.noaa.gov/sos/server.php") #?request=GetCapabilities&service=SOS
sos_urls = sorted(set(sos_urls))
print "Total SOS:",len(sos_urls)
print "\n".join(sos_urls)
# <markdowncell>
# ### SOS Requirements
# <codecell>
start_time = dt.datetime.strptime(start_date,'%Y-%m-%d %H:%M')
end_time = dt.datetime.strptime(stop_date,'%Y-%m-%d %H:%M')
iso_start = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
iso_end = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')
# <codecell>
collector = CoopsSos()
collector.start_time = start_time
collector.end_time = end_time
collector.variables = data_dict["winds"]["sos_name"]
collector.server.identification.title
print collector.start_time,":", collector.end_time
ofrs = collector.server.offerings
# <markdowncell>
# ###Find all SOS stations within the bounding box and time extent
# <codecell>
print "Date: ",iso_start," to ", iso_end
box_str=','.join(str(e) for e in bounding_box)
print "Lat/Lon Box: ",box_str
url = (('http://opendap.co-ops.nos.noaa.gov/ioos-dif-sos/SOS?'
'service=SOS&request=GetObservation&version=1.0.0&'
'observedProperty=%s&'
'offering=urn:ioos:network:NOAA.NOS.CO-OPS:MetActive&'
'featureOfInterest=BBOX:%s&responseFormat=text/csv') % (sos_name, box_str))
# url = (('http://sdf.ndbc.noaa.gov/sos/server.php?request=GetObservation&service=SOS&version=1.0.0&offering=urn:ioos:network:noaa.nws.ndbc:all'
# '&featureofinterest=BBOX:%s&observedproperty=%s&responseformat=text/csv') % (box_str, sos_name))
print url
obs_loc_df = pd.read_csv(url)
# <codecell>
obs_loc_df.head()
# <codecell>
# Index the data frame to filter repeats by bin #
stations = [sta.split(':')[-1] for sta in obs_loc_df['station_id']]
obs_lon = [sta for sta in obs_loc_df['longitude (degree)']]
obs_lat = [sta for sta in obs_loc_df['latitude (degree)']]
# <headingcell level=3>
# Request CSV response from collector and convert to Pandas DataFrames
# <codecell>
ts_rng = pd.date_range(start=jd_start, end=jd_stop, freq='6Min')
ts = pd.DataFrame(index=ts_rng)
obs_df = []
wind_speed_df = []
sta_names = []
sta_failed = []
for sta in stations:
try:
df = coops2df(collector, sta, sos_name)
except Exception as e:
print "Error" + str(e)
continue
name = df.name
sta_names.append(name)
if df.empty:
sta_failed.append(name)
df = pd.DataFrame(np.arange(len(ts)) * np.NaN, index=ts.index, columns=['Observed Data'])
df.name = name
# Limit interpolation to 10 points (10 @ 6min = 1 hour).
# col = 'Observed Data'
# concatenated = pd.concat([df, ts], axis=1).interpolate(limit=10)[col]
# obs_df.append(pd.DataFrame(concatenated))
obs_df.append(df)
obs_df[-1].name = name
# <markdowncell>
# ### Plot wind speeds and gusts as a time series
# <codecell>
for df in obs_df:
fig, axes = plt.subplots(1, 1, figsize=(20,6))
axes = df['wind_speed (m/s)'].plot(title=df.name, legend=True, color='b')
axes.set_ylabel('Wind Speed (m/s)')
for tl in axes.get_yticklabels():
tl.set_color('b')
axes.yaxis.label.set_color('blue')
axes = df['wind_speed_of_gust (m/s)'].plot(title=df.name, legend=True, color='g')
plt.setp(axes.lines[0], linewidth=1.0, zorder=1)
# <markdowncell>
# ### Plot wind direction
# <codecell>
for df in obs_df:
figure()
# Only plot the first bin
ax = df['wind_from_direction (degree)'].plot(figsize=(14, 6), title=df.name, legend=False)
plt.setp(ax.lines[0], linewidth=1.0, zorder=1)
ax.legend()
ax.set_ylabel('Wind Direction (degree)')
# <markdowncell>
# ###Get model output from OPeNDAP URLS
# Try to open all the OPeNDAP URLS using Iris from the British Met Office. If we can open in Iris, we know it's a model result.
# <codecell>
name_in_list = lambda cube: cube.standard_name in data_dict['winds']['u_names']
u_constraint = iris.Constraint(cube_func=name_in_list)
name_in_list = lambda cube: cube.standard_name in data_dict['winds']['v_names']
v_constraint = iris.Constraint(cube_func=name_in_list)
# <codecell>
# # Create time index for model DataFrame
ts_rng = pd.date_range(start=jd_start, end=jd_stop, freq='H')
ts = pd.DataFrame(index=ts_rng)
# Create list of model DataFrames for each station
model_df = []
for df in obs_df:
model_df.append(pd.DataFrame(index=ts.index))
model_df[-1].name = df.name
model_lat = []
model_lon = []
# Use only data within 0.4 degrees.
max_dist = 0.4
# Use only data where the standard deviation of the time series exceeds 0.01 m (1 cm).
# This eliminates flat line model time series that come from land points that should have had missing values.
min_var = 0.01
# print dap_urls
for url in dap_urls:
# model_df, model_lat, model_lon = get_model_data(url, model_df, max_dist, min_var)
try:
print 'Attemping to load {0}'.format(url)
u = iris.load_cube(url, u_constraint)
v = iris.load_cube(url, v_constraint)
# take first 20 chars for model name
mod_name = u.attributes['title'][0:30]
r = u.shape
timevar = find_timevar(u)
lat = u.coord(axis='Y').points
lon = u.coord(axis='X').points
# Convert longitude to [-180 180]
if max(lon) > 180:
lon[lon>180] = lon[lon>180]-360
jd = timevar.units.num2date(timevar.points)
start = timevar.units.date2num(jd_start)
istart = timevar.nearest_neighbour_index(start)
stop = timevar.units.date2num(jd_stop)
istop = timevar.nearest_neighbour_index(stop)
# Only proceed if we have data in the range requested.
if istart != istop:
nsta = len(stations)
if len(r) == 3:
print('[Structured grid model]:', url)
d = u[0, :, :].data
if len(lon.shape) == 1:
new_lon, new_lat = np.meshgrid(lon, lat)
else:
new_lon, new_lat = lon, lat
# Find the closest non-land point from a structured grid model.
j, i, dd = find_ij(new_lon, new_lat, d, obs_lon, obs_lat)
# Keep the lat lon of the grid point
model_lat = lat[j].tolist()
model_lon = lon[i].tolist()
for n in range(nsta):
# Only use if model cell is within max_dist of station
if dd[n] <= max_dist:
u_arr = u[istart:istop, j[n], i[n]].data
v_arr = v[istart:istop, j[n], i[n]].data
# Model data is in m/s so convert to cm/s
arr = np.sqrt((u_arr)**2 + (v_arr)**2)
if u_arr.std() >= min_var:
c = mod_df(arr, timevar, istart, istop,
mod_name, ts)
name = obs_df[n].name
model_df[n] = pd.concat([model_df[n], c], axis=1)
model_df[n].name = name
else:
print 'min_var error'
else:
print 'Max dist error'
else:
print 'Grid has {0} dimensions'.format(str(len(r)))
else:
print 'No data in range'
except Exception as e:
warn("\n%s\n" % e)
pass
# <markdowncell>
# ### Plot the Observation Stations and Model Points on same Map
# <codecell>
# Find center of bounding box
lat_center = abs(bounding_box[3] - bounding_box[1])/2 + bounding_box[1]
lon_center = abs(bounding_box[0]-bounding_box[2])/2 + bounding_box[0]
m = folium.Map(location=[lat_center, lon_center], zoom_start=6)
# Now loop through stations and plot markers
for n in range(len(stations)):
# Get the station name
name = stations[n]
longname = obs_df[n].name
# Get obs station lat/lon
olat = obs_lat[n]
olon = obs_lon[n]
# Create obs station marker
popup_string = ('<b>Station:</b><br>'+ longname)
m.simple_marker([olat, olon], popup=popup_string)
# Only plot if there is model data
if model_lat:
# Get model grid points lat/lon
mlat = model_lat[n]
mlon = model_lon[n]
# Plot a line from obs station to corresponding model grid point
data_1=[olat,olon]
data_2=[model_lat[n],model_lon[n]]
m.line([data_1,data_2],line_color='#00FF00', line_weight=5)
# Create model grid point marker
popup_string = ('<b>Model Grid Point</b>')
# m.simple_marker([model_lat[n], model_lon[n]], popup=popup_string, marker_color='red', marker_icon='download',clustered_marker=False)
m.circle_marker([mlat, mlon], popup=popup_string, fill_color='#ff0000', radius=5000, line_color='#ff0000')
m.line(get_coordinates(bounding_box, bounding_box_type), line_color='#FF0000', line_weight=5)
inline_map(m)
# <markdowncell>
# #### Plot Modeled vs Obs Winds
# <codecell>
for n in range(len(obs_df)):
# First plot the model data
if not model_df[n].empty and not obs_df[n].empty:
ax = model_df[n].plot(figsize=(14, 6), title=model_df[n].name, legend=False)
plt.setp(ax.lines[0], linewidth=3, color='0.7', zorder=1)
ax.legend()
# Overlay the obs data (resample to hourly instead of 6 mins!)
ax = obs_df[n]['wind_speed (m/s)'].resample('H', how='mean').plot(title=obs_df[n].name, legend=False, color='b')
plt.setp(ax.lines[1], linewidth=1.0, zorder=1)
ax.legend()
ax.set_ylabel('Wind Speed (m/s)')
plt.show()
# <codecell>
|
mit
|
aabadie/scikit-learn
|
examples/svm/plot_iris.py
|
24
|
3252
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
|
bsd-3-clause
|
asurve/arvind-sysml2
|
projects/breast_cancer/breastcancer/visualization.py
|
18
|
2001
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Visualization -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for visualizing data for the breast
cancer project.
"""
import matplotlib.pyplot as plt
def visualize_tile(tile):
"""
Plot a tissue tile.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
None
"""
plt.imshow(tile)
plt.show()
def visualize_sample(sample, size=256):
"""
Plot a tissue sample.
Args:
sample: A square sample flattened to a vector of size
(channels*size_x*size_y).
size: The width and height of the square samples.
Returns:
None
"""
# Change type, reshape, transpose to (size_x, size_y, channels).
length = sample.shape[0]
channels = int(length / (size * size))
if channels > 1:
sample = sample.astype('uint8').reshape((channels, size, size)).transpose(1,2,0)
plt.imshow(sample)
else:
vmax = 255 if sample.max() > 1 else 1
sample = sample.reshape((size, size))
plt.imshow(sample, cmap="gray", vmin=0, vmax=vmax)
plt.show()
|
apache-2.0
|
toirl/cointrader
|
cointrader/chart.py
|
1
|
3141
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import pandas
import datetime
from stockstats import StockDataFrame
def chart2csv(chart):
out = []
header = "date,amount,close,high,low,open,volume"
out.append(header)
for cs in chart:
out.append("{},{},{},{},{},{},{}".format(cs["date"],
"", cs["close"], cs["high"],
cs["low"], cs["open"], cs["volume"]))
return u"\n".join(out)
def search_chartdata_by_date(data, dt, le=True):
ts = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
chart_item = data[0]
for d in data:
if d["date"] <= ts:
chart_item = d
else:
break
return chart_item
class Chart(object):
"""The chart provides a unified interface to the chart data. It also
gives access so some common indicators like macd, sma and ema.
The `data` is provided as list of dictionaries where each
dictionary represents a single set of data per point in the
chart::
{
u'date': 1500112800,
u'open': 0.07132169,
u'close': 0.07162004,
u'high': 0.07172972,
u'low': 0.07114623,
u'volume': 7.49372245,
u'quoteVolume': 104.69114835,
u'weightedAverage': 0.07157933,
}
The `start` and `end` datetimes define the relevant timeframe of
the chart for later profit calculations. This date range is
needed as the chart itself cointains more more datapoints than
within the given date range. This is because we need more data
to ensure that indicators like ema and sma provide sensefull
values right on from the begin of the timeframe. So there must
be more data available before the start.
"""
def __init__(self, data, start, end):
"""Will build a chart instance from the given raw data input.
:data: List of datapoints as dictionary.
:start: Datetime object.
:end: Datetime object.
"""
self._data = data
self._start = start
self._end = end
self._stock = StockDataFrame.retype(pandas.read_csv(io.StringIO(chart2csv(data))))
@property
def data(self):
return self._data
@property
def date(self):
return self._data[-1]["date"]
@property
def close(self):
return self._data[-1]["close"]
def get_first_point(self):
return search_chartdata_by_date(self.data, self._start)
def get_last_point(self):
return search_chartdata_by_date(self.data, self._end)
def values(self, which="close"):
return [(v["date"], v[which]) for v in self._data]
################
# Indicators #
################
def macdh(self):
macdh = self._stock.get("macdh")
return macdh.tolist()
def sma(self, window=10):
sma = self._stock.get("close_{}_sma".format(window))
return sma.tolist()
def ema(self, window=10):
ema = self._stock.get("close_{}_ema".format(window))
return ema.tolist()
|
mit
|
gotomypc/scikit-learn
|
examples/plot_multioutput_face_completion.py
|
330
|
3019
|
"""
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
X_train = train[:, :np.ceil(0.5 * n_pixels)] # Upper half of the faces
y_train = train[:, np.floor(0.5 * n_pixels):] # Lower half of the faces
X_test = test[:, :np.ceil(0.5 * n_pixels)]
y_test = test[:, np.floor(0.5 * n_pixels):]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
|
bsd-3-clause
|
blink1073/scikit-image
|
skimage/viewer/viewers/core.py
|
33
|
13265
|
"""
ImageViewer class for viewing and interacting with images.
"""
import numpy as np
from ... import io, img_as_float
from ...util.dtype import dtype_range
from ...exposure import rescale_intensity
from ..qt import QtWidgets, Qt, Signal
from ..widgets import Slider
from ..utils import (dialogs, init_qtapp, figimage, start_qtapp,
update_axes_image)
from ..utils.canvas import BlitManager, EventManager
from ..plugins.base import Plugin
__all__ = ['ImageViewer', 'CollectionViewer']
def mpl_image_to_rgba(mpl_image):
"""Return RGB image from the given matplotlib image object.
Each image in a matplotlib figure has its own colormap and normalization
function. Return RGBA (RGB + alpha channel) image with float dtype.
Parameters
----------
mpl_image : matplotlib.image.AxesImage object
The image being converted.
Returns
-------
img : array of float, shape (M, N, 4)
An image of float values in [0, 1].
"""
image = mpl_image.get_array()
if image.ndim == 2:
input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax)
image = rescale_intensity(image, in_range=input_range)
# cmap complains on bool arrays
image = mpl_image.cmap(img_as_float(image))
elif image.ndim == 3 and image.shape[2] == 3:
# add alpha channel if it's missing
image = np.dstack((image, np.ones_like(image)))
return img_as_float(image)
class ImageViewer(QtWidgets.QMainWindow):
"""Viewer for displaying images.
This viewer is a simple container object that holds a Matplotlib axes
for showing images. `ImageViewer` doesn't subclass the Matplotlib axes (or
figure) because of the high probability of name collisions.
Subclasses and plugins will likely extend the `update_image` method to add
custom overlays or filter the displayed image.
Parameters
----------
image : array
Image being viewed.
Attributes
----------
canvas, fig, ax : Matplotlib canvas, figure, and axes
Matplotlib canvas, figure, and axes used to display image.
image : array
Image being viewed. Setting this value will update the displayed frame.
original_image : array
Plugins typically operate on (but don't change) the *original* image.
plugins : list
List of attached plugins.
Examples
--------
>>> from skimage import data
>>> image = data.coins()
>>> viewer = ImageViewer(image) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
dock_areas = {'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea}
# Signal that the original image has been changed
original_image_changed = Signal(np.ndarray)
def __init__(self, image, useblit=True):
# Start main loop
init_qtapp()
super(ImageViewer, self).__init__()
#TODO: Add ImageViewer to skimage.io window manager
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Image Viewer")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('Open file', self.open_file,
Qt.CTRL + Qt.Key_O)
self.file_menu.addAction('Save to file', self.save_to_file,
Qt.CTRL + Qt.Key_S)
self.file_menu.addAction('Quit', self.close,
Qt.CTRL + Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.main_widget = QtWidgets.QWidget()
self.setCentralWidget(self.main_widget)
if isinstance(image, Plugin):
plugin = image
image = plugin.filtered_image
plugin.image_changed.connect(self._update_original_image)
# When plugin is started, start
plugin._started.connect(self._show)
self.fig, self.ax = figimage(image)
self.canvas = self.fig.canvas
self.canvas.setParent(self)
self.ax.autoscale(enable=False)
self._tools = []
self.useblit = useblit
if useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
self._image_plot = self.ax.images[0]
self._update_original_image(image)
self.plugins = []
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.addWidget(self.canvas)
status_bar = self.statusBar()
self.status_message = status_bar.showMessage
sb_size = status_bar.sizeHint()
cs_size = self.canvas.sizeHint()
self.resize(cs_size.width(), cs_size.height() + sb_size.height())
self.connect_event('motion_notify_event', self._update_status_bar)
def __add__(self, plugin):
"""Add plugin to ImageViewer"""
plugin.attach(self)
self.original_image_changed.connect(plugin._update_original_image)
if plugin.dock:
location = self.dock_areas[plugin.dock]
dock_location = Qt.DockWidgetArea(location)
dock = QtWidgets.QDockWidget()
dock.setWidget(plugin)
dock.setWindowTitle(plugin.name)
self.addDockWidget(dock_location, dock)
horiz = (self.dock_areas['left'], self.dock_areas['right'])
dimension = 'width' if location in horiz else 'height'
self._add_widget_size(plugin, dimension=dimension)
return self
def _add_widget_size(self, widget, dimension='width'):
widget_size = widget.sizeHint()
viewer_size = self.frameGeometry()
dx = dy = 0
if dimension == 'width':
dx = widget_size.width()
elif dimension == 'height':
dy = widget_size.height()
w = viewer_size.width()
h = viewer_size.height()
self.resize(w + dx, h + dy)
def open_file(self, filename=None):
"""Open image file and display in viewer."""
if filename is None:
filename = dialogs.open_file_dialog()
if filename is None:
return
image = io.imread(filename)
self._update_original_image(image)
def update_image(self, image):
"""Update displayed image.
This method can be overridden or extended in subclasses and plugins to
react to image changes.
"""
self._update_original_image(image)
def _update_original_image(self, image):
self.original_image = image # update saved image
self.image = image.copy() # update displayed image
self.original_image_changed.emit(image)
def save_to_file(self, filename=None):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
if filename is None:
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
def closeEvent(self, event):
self.close()
def _show(self, x=0):
self.move(x, 0)
for p in self.plugins:
p.show()
super(ImageViewer, self).show()
self.activateWindow()
self.raise_()
def show(self, main_window=True):
"""Show ImageViewer and attached plugins.
This behaves much like `matplotlib.pyplot.show` and `QWidget.show`.
"""
self._show()
if main_window:
start_qtapp()
return [p.output() for p in self.plugins]
def redraw(self):
if self.useblit:
self._blit_manager.redraw()
else:
self.canvas.draw_idle()
@property
def image(self):
return self._img
@image.setter
def image(self, image):
self._img = image
update_axes_image(self._image_plot, image)
# update display (otherwise image doesn't fill the canvas)
h, w = image.shape[:2]
self.ax.set_xlim(0, w)
self.ax.set_ylim(h, 0)
# update color range
clim = dtype_range[image.dtype.type]
if clim[0] < 0 and image.min() >= 0:
clim = (0, clim[1])
self._image_plot.set_clim(clim)
if self.useblit:
self._blit_manager.background = None
self.redraw()
def reset_image(self):
self.image = self.original_image.copy()
def connect_event(self, event, callback):
"""Connect callback function to matplotlib event and return id."""
cid = self.canvas.mpl_connect(event, callback)
return cid
def disconnect_event(self, callback_id):
"""Disconnect callback by its id (returned by `connect_event`)."""
self.canvas.mpl_disconnect(callback_id)
def _update_status_bar(self, event):
if event.inaxes and event.inaxes.get_navigate():
self.status_message(self._format_coord(event.xdata, event.ydata))
else:
self.status_message('')
def add_tool(self, tool):
if self.useblit:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self.useblit:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
def _format_coord(self, x, y):
# callback function to format coordinate display in status bar
x = int(x + 0.5)
y = int(y + 0.5)
try:
return "%4s @ [%4s, %4s]" % (self.image[y, x], x, y)
except IndexError:
return ""
class CollectionViewer(ImageViewer):
"""Viewer for displaying image collections.
Select the displayed frame of the image collection using the slider or
with the following keyboard shortcuts:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
Parameters
----------
image_collection : list of images
List of images to be displayed.
update_on : {'move' | 'release'}
Control whether image is updated on slide or release of the image
slider. Using 'on_release' will give smoother behavior when displaying
large images or when writing a plugin/subclass that requires heavy
computation.
"""
def __init__(self, image_collection, update_on='move', **kwargs):
self.image_collection = image_collection
self.index = 0
self.num_images = len(self.image_collection)
first_image = image_collection[0]
super(CollectionViewer, self).__init__(first_image)
slider_kws = dict(value=0, low=0, high=self.num_images - 1)
slider_kws['update_on'] = update_on
slider_kws['callback'] = self.update_index
slider_kws['value_type'] = 'int'
self.slider = Slider('frame', **slider_kws)
self.layout.addWidget(self.slider)
#TODO: Adjust height to accomodate slider; the following doesn't work
# s_size = self.slider.sizeHint()
# cs_size = self.canvas.sizeHint()
# self.resize(cs_size.width(), cs_size.height() + s_size.height())
def update_index(self, name, index):
"""Select image on display using index into image collection."""
index = int(round(index))
if index == self.index:
return
# clip index value to collection limits
index = max(index, 0)
index = min(index, self.num_images - 1)
self.index = index
self.slider.val = index
self.update_image(self.image_collection[index])
def keyPressEvent(self, event):
if type(event) == QtWidgets.QKeyEvent:
key = event.key()
# Number keys (code: 0 = key 48, 9 = key 57) move to deciles
if 48 <= key < 58:
index = 0.1 * int(key - 48) * self.num_images
self.update_index('', index)
event.accept()
else:
event.ignore()
else:
event.ignore()
|
bsd-3-clause
|
blink1073/scikit-image
|
skimage/io/manage_plugins.py
|
17
|
10353
|
"""Handle image reading, writing and plotting plugins.
To improve performance, plugins are only loaded as needed. As a result, there
can be multiple states for a given plugin:
available: Defined in an *ini file located in `skimage.io._plugins`.
See also `skimage.io.available_plugins`.
partial definition: Specified in an *ini file, but not defined in the
corresponding plugin module. This will raise an error when loaded.
available but not on this system: Defined in `skimage.io._plugins`, but
a dependent library (e.g. Qt, PIL) is not available on your system.
This will raise an error when loaded.
loaded: The real availability is determined when it's explicitly loaded,
either because it's one of the default plugins, or because it's
loaded explicitly by the user.
"""
import sys
if sys.version.startswith('3'):
from configparser import ConfigParser # Python 3
else:
from ConfigParser import ConfigParser # Python 2
import os.path
from glob import glob
from .collection import imread_collection_wrapper
__all__ = ['use_plugin', 'call_plugin', 'plugin_info', 'plugin_order',
'reset_plugins', 'find_available_plugins', 'available_plugins']
# The plugin store will save a list of *loaded* io functions for each io type
# (e.g. 'imread', 'imsave', etc.). Plugins are loaded as requested.
plugin_store = None
# Dictionary mapping plugin names to a list of functions they provide.
plugin_provides = {}
# The module names for the plugins in `skimage.io._plugins`.
plugin_module_name = {}
# Meta-data about plugins provided by *.ini files.
plugin_meta_data = {}
# For each plugin type, default to the first available plugin as defined by
# the following preferences.
preferred_plugins = {
# Default plugins for all types (overridden by specific types below).
'all': ['pil', 'matplotlib', 'qt', 'freeimage'],
'imshow': ['matplotlib']
}
def _clear_plugins():
"""Clear the plugin state to the default, i.e., where no plugins are loaded
"""
global plugin_store
plugin_store = {'imread': [],
'imsave': [],
'imshow': [],
'imread_collection': [],
'_app_show': []}
_clear_plugins()
def _load_preferred_plugins():
# Load preferred plugin for each io function.
io_types = ['imsave', 'imshow', 'imread_collection', 'imread']
for p_type in io_types:
_set_plugin(p_type, preferred_plugins['all'])
plugin_types = (p for p in preferred_plugins.keys() if p != 'all')
for p_type in plugin_types:
_set_plugin(p_type, preferred_plugins[p_type])
def _set_plugin(plugin_type, plugin_list):
for plugin in plugin_list:
if plugin not in available_plugins:
continue
try:
use_plugin(plugin, kind=plugin_type)
break
except (ImportError, RuntimeError, OSError):
pass
def reset_plugins():
_clear_plugins()
_load_preferred_plugins()
def _parse_config_file(filename):
"""Return plugin name and meta-data dict from plugin config file."""
parser = ConfigParser()
parser.read(filename)
name = parser.sections()[0]
meta_data = {}
for opt in parser.options(name):
meta_data[opt] = parser.get(name, opt)
return name, meta_data
def _scan_plugins():
"""Scan the plugins directory for .ini files and parse them
to gather plugin meta-data.
"""
pd = os.path.dirname(__file__)
config_files = glob(os.path.join(pd, '_plugins', '*.ini'))
for filename in config_files:
name, meta_data = _parse_config_file(filename)
plugin_meta_data[name] = meta_data
provides = [s.strip() for s in meta_data['provides'].split(',')]
valid_provides = [p for p in provides if p in plugin_store]
for p in provides:
if not p in plugin_store:
print("Plugin `%s` wants to provide non-existent `%s`."
" Ignoring." % (name, p))
# Add plugins that provide 'imread' as provider of 'imread_collection'.
need_to_add_collection = ('imread_collection' not in valid_provides and
'imread' in valid_provides)
if need_to_add_collection:
valid_provides.append('imread_collection')
plugin_provides[name] = valid_provides
plugin_module_name[name] = os.path.basename(filename)[:-4]
_scan_plugins()
def find_available_plugins(loaded=False):
"""List available plugins.
Parameters
----------
loaded : bool
If True, show only those plugins currently loaded. By default,
all plugins are shown.
Returns
-------
p : dict
Dictionary with plugin names as keys and exposed functions as
values.
"""
active_plugins = set()
for plugin_func in plugin_store.values():
for plugin, func in plugin_func:
active_plugins.add(plugin)
d = {}
for plugin in plugin_provides:
if not loaded or plugin in active_plugins:
d[plugin] = [f for f in plugin_provides[plugin]
if not f.startswith('_')]
return d
available_plugins = find_available_plugins()
def call_plugin(kind, *args, **kwargs):
"""Find the appropriate plugin of 'kind' and execute it.
Parameters
----------
kind : {'imshow', 'imsave', 'imread', 'imread_collection'}
Function to look up.
plugin : str, optional
Plugin to load. Defaults to None, in which case the first
matching plugin is used.
*args, **kwargs : arguments and keyword arguments
Passed to the plugin function.
"""
if not kind in plugin_store:
raise ValueError('Invalid function (%s) requested.' % kind)
plugin_funcs = plugin_store[kind]
if len(plugin_funcs) == 0:
msg = ("No suitable plugin registered for %s.\n\n"
"You may load I/O plugins with the `skimage.io.use_plugin` "
"command. A list of all available plugins can be found using "
"`skimage.io.plugins()`.")
raise RuntimeError(msg % kind)
plugin = kwargs.pop('plugin', None)
if plugin is None:
_, func = plugin_funcs[0]
else:
_load(plugin)
try:
func = [f for (p, f) in plugin_funcs if p == plugin][0]
except IndexError:
raise RuntimeError('Could not find the plugin "%s" for %s.' %
(plugin, kind))
return func(*args, **kwargs)
def use_plugin(name, kind=None):
"""Set the default plugin for a specified operation. The plugin
will be loaded if it hasn't been already.
Parameters
----------
name : str
Name of plugin.
kind : {'imsave', 'imread', 'imshow', 'imread_collection'}, optional
Set the plugin for this function. By default,
the plugin is set for all functions.
See Also
--------
available_plugins : List of available plugins
Examples
--------
To use Matplotlib as the default image reader, you would write:
>>> from skimage import io
>>> io.use_plugin('matplotlib', 'imread')
To see a list of available plugins run ``io.available_plugins``. Note that
this lists plugins that are defined, but the full list may not be usable
if your system does not have the required libraries installed.
"""
if kind is None:
kind = plugin_store.keys()
else:
if not kind in plugin_provides[name]:
raise RuntimeError("Plugin %s does not support `%s`." %
(name, kind))
if kind == 'imshow':
kind = [kind, '_app_show']
else:
kind = [kind]
_load(name)
for k in kind:
if not k in plugin_store:
raise RuntimeError("'%s' is not a known plugin function." % k)
funcs = plugin_store[k]
# Shuffle the plugins so that the requested plugin stands first
# in line
funcs = [(n, f) for (n, f) in funcs if n == name] + \
[(n, f) for (n, f) in funcs if n != name]
plugin_store[k] = funcs
def _inject_imread_collection_if_needed(module):
"""Add `imread_collection` to module if not already present."""
if not hasattr(module, 'imread_collection') and hasattr(module, 'imread'):
imread = getattr(module, 'imread')
func = imread_collection_wrapper(imread)
setattr(module, 'imread_collection', func)
def _load(plugin):
"""Load the given plugin.
Parameters
----------
plugin : str
Name of plugin to load.
See Also
--------
plugins : List of available plugins
"""
if plugin in find_available_plugins(loaded=True):
return
if not plugin in plugin_module_name:
raise ValueError("Plugin %s not found." % plugin)
else:
modname = plugin_module_name[plugin]
plugin_module = __import__('skimage.io._plugins.' + modname,
fromlist=[modname])
provides = plugin_provides[plugin]
for p in provides:
if p == 'imread_collection':
_inject_imread_collection_if_needed(plugin_module)
elif not hasattr(plugin_module, p):
print("Plugin %s does not provide %s as advertised. Ignoring." %
(plugin, p))
continue
store = plugin_store[p]
func = getattr(plugin_module, p)
if not (plugin, func) in store:
store.append((plugin, func))
def plugin_info(plugin):
"""Return plugin meta-data.
Parameters
----------
plugin : str
Name of plugin.
Returns
-------
m : dict
Meta data as specified in plugin ``.ini``.
"""
try:
return plugin_meta_data[plugin]
except KeyError:
raise ValueError('No information on plugin "%s"' % plugin)
def plugin_order():
"""Return the currently preferred plugin order.
Returns
-------
p : dict
Dictionary of preferred plugin order, with function name as key and
plugins (in order of preference) as value.
"""
p = {}
for func in plugin_store:
p[func] = [plugin_name for (plugin_name, f) in plugin_store[func]]
return p
|
bsd-3-clause
|
EliotBryant/ShadDetector
|
shadDetector_testing/Gradient Based Methods/logimage.py
|
1
|
1246
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 21:51:29 2017
@author: Eliot
logimage.py
"""
import cv2
import numpy as np
import os
from matplotlib import pyplot as plt
thisfilepath = os.path.dirname(__file__)
loaddirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/orig"))
savedirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/altered"))
image = cv2.imread(loaddirpath + "/001CROP11-17-59.jpg")
print(image.shape)
image = np.float32(image)
# actual code
logimage = cv2.log(image)
'''
All below is just to visualise what log space looks like
'''
logviewable = 35*logimage
logviewable = logviewable - 100 #comment out
logviewable = 2.5*logviewable #comment out
logu8 = np.uint8(logviewable)
ycrcb = cv2.cvtColor(logu8, cv2.COLOR_BGR2YCrCb)
y = ycrcb[...,0]
y = cv2.equalizeHist(y)
histr = cv2.calcHist([y],[0],None,[256],[0,256])
plt.figure(0)
plt.plot(histr)
plt.xlim([0,256])
logimageagain = cv2.cvtColor(ycrcb, cv2.COLOR_YCrCb2BGR)
color = ('b','g','r')
plt.figure(1)
for i,col in enumerate(color):
histr = cv2.calcHist([logimageagain],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
cv2.imshow("i", logimageagain)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
gpl-3.0
|
robin-lai/scikit-learn
|
sklearn/kernel_ridge.py
|
155
|
6545
|
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
|
bsd-3-clause
|
jpedroan/megua
|
megua/mathcommon.py
|
1
|
23471
|
# coding=utf-8
r"""
Generic Mathematical routines for MEGUA.
AUTHORS:
- Pedro Cruz (2010-06): initial version
- Pedro Cruz (2013-11): added logb (and this module is now imported in ex.py)
- Pedro Cruz (2016-01): changed for new formal functions sagemath standard.
"""
#*****************************************************************************
# Copyright (C) 2011 Pedro Cruz <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
r"""
Must import everything from sage.all::
from sage.all import *
Avoiding this is impossible. For example::
from sage.rings.real_double import RDF
does not work.
.. test with: sage -t mathcommon.py
"""
#PYTHON modules
from string import join
import jinja2
import os
#SAGEMATH modules
#from sage.all import var,RealField,SR,function,e
from sage.all import *
#MEGUA modules
from megua.ur import ur
from megua.tounicode import to_unicode
"""
the following code is about templating.
TODO: incorporate other templating code into one module.
"""
#Templating (with Jinja2)
natlang = 'pt_pt'
if os.environ.has_key('MEGUA_TEMPLATE_PATH'):
TEMPLATE_PATH = os.environ['MEGUA_TEMPLATE_PATH']
else:
from pkg_resources import resource_filename
TEMPLATE_PATH = os.path.join(resource_filename(__name__,''),'template',natlang)
#print "Templates in mathcommon.py: '%s' language at %s" % (natlang,TEMPLATE_PATH)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_PATH))
#Sometimes needed.
x,y=var('x,y')
#For 4 digit numbers.
R15=RealField(15)
#For more digit numbers
R20 = RealField(20)
##############
#TikZmod -- Routines to convert sage plots into latex TikZ markup.
##############
def getpoints_str(pointlist):
return join( [ str( ( R20(p[0]), R20(p[1]) ) ) for p in pointlist ], ' ' )
def tikz_axis(vmin,vmax,axis='x', points=None, ticksize=2, originlabels=True):
r"""
Draw the vertical or horizontal 2d axis.
INPUT:
- ``vmin``: first point of the axis.
- ``vmax``: last point of the axis (where the arrow ends).
- ``axis``: 'x' or 'y'.
- ``points``: if None, points are guessed. Otherwise they are used to place marks.
- ``originlabels'': (dafault True) If false (0,0) won't have labels.
Specials thanks to Paula Oliveira for the first version.
Other resource: http://matplotlib.org/users/pgf.html#pgf-tutorial
"""
if points is None:
#integer tick marks only (for now)
first_int = floor(vmin)
last_int = ceil(vmax)
#last_int - first_int + 1 gives all integers,
#but the last point is the arrow vertice: no label and no tick mark so "+1" is not added.
points = [ i+first_int for i in range(last_int - first_int) ]
if not originlabels and 0 in points:
pos = points.index(0)
del points[pos]
else:
first_int = min(points)
last_int = max(points) + 1 #added +1 for the reason above.
if axis=='x':
#integer tick marks
tmarks = r'\foreach \x in %s' % Set(points)
tmarks += r'\draw[color=black] (\x,-%d pt) node[below] {\scriptsize $\x$} -- (\x,%d pt) ;' % (ticksize,ticksize)
#main line and arrow at end
tmain = r'\draw[->,color=black] (%f,0) -- (%f,0);' % (first_int,last_int)
else:
#integer tick marks
tmarks = r'\foreach \y in %s' % Set(points)
tmarks += r'\draw[color=black] (-%d pt,\y) node[left] {\scriptsize $\y$} -- (%d pt,\y);' % (ticksize,ticksize)
#main line and arrow at end
tmain = r'\draw[->,color=black] (0,%f) -- (0,%f);' % (first_int,last_int)
return tmain + tmarks
# =====================
# Google charts
# =====================
def svg_pie_chart(valueslist, chartid="chart1", title="Chart", width=400, height=300):
"""
Plot an SVG chart pie.
Note: uses google charts.
INPUT:
- ``valueslist`` -- list of pairs
- ``chartname`` -- like a username 'Pizza Pie'
- ``title`` -- like ''How Much Pizza I Ate Last Night'
- ``width`` -- default 400
- ``height`` -- default 300
OUTPUT:
A HTML string with code (google chart html code) to plot a chart.
"""
filename="svg_pie_chart.html"
try:
tmpl = env.get_template(filename)
except jinja2.exceptions.TemplateNotFound:
return "MegUA -- missing template %s"%filename
r = tmpl.render(valueslist=valueslist,
chartid=chartid,
title=title,
width=width,
height=height)
print "TYPE=",type(r)
return r
def svg_pie_chart(valueslist, chartid="chart1", title="Chart", width=400, height=300):
"""
Plot an SVG chart pie.
Note: uses google charts.
INPUT:
- ``valueslist`` -- list of pairs
- ``chartname`` -- like a username 'Pizza Pie'
- ``title`` -- like ''How Much Pizza I Ate Last Night'
- ``width`` -- default 400
- ``height`` -- default 300
OUTPUT:
A HTML string with code (google chart html code) to plot a chart.
"""
filename="svg_pie_chart.html"
try:
tmpl = env.get_template(filename)
except jinja2.exceptions.TemplateNotFound:
return "MegUA -- missing template %s"%filename
r = tmpl.render(valueslist=valueslist,
chartid=chartid,
title=title,
width=width,
height=height)
return r
#=======================
# cis for "high school"
#=======================
def CIS_latex(fun,x):
return r'\text{cis}\left(%s\right)' % latex(x)
x,b=SR.var('x,b')
FORMALCIS = function('cis',nargs=1, print_latex_func=CIS_latex)
def cis(x):
return FORMALCIS(x)
#=======================
# log for "high school"
#=======================
def FORMALLOG_latex(fun,x,base=None):
if base==e or base is None:
return r'\ln\left(%s\right)' % latex(x)
elif base==10:
return r'\log\left(%s\right)' % latex(x)
else:
return r'\log_{%s}\left(%s\right)' % (latex(base),latex(x))
x,b=SR.var('x,b')
FORMALLOG = function('logb',nargs=2, print_latex_func=FORMALLOG_latex)
#Two arguments: x and base.
def logb(x,base=e,factorize=False):
r"""
This is a procedure that decides to compute or to print a logarithm in any base.
(logb is an alternative to ``log`` from Sage.)
After calling `logb()` several objects could be returned including the `FORMALLOG` formal log function.
This version keeps the base because ``log(105,base=10)`` is transformed by Sage (and many others CAS)
into ``log(105)/log(10)`` and sometimes this is not what we want to see as a result.
LaTeX representations are:
* ``\log_{base} (x)`` if base is not ``e``.
* ``\log (x)`` if the base is exponential.
INPUT:
- ``x`` - the argument of log.
- ``base`` - the base of logarithm.
- ``factorize`` - decompose in a simple expression if argument if decomposable in prime factors.
OUTPUT:
- an expression based on ``logb``, Sage ``log`` or any other expression.
Basic cases::
sage: logb(e) #assume base=e
1
sage: logb(10,base=10)
1
sage: logb(1) #assume base=e
0
sage: logb(1,base=10) #assume base=e
0
sage: logb(e,base=10)
logb(e, 10)
sage: logb(10,base=e)
logb(10, e)
sage: logb(sqrt(105))
logb(sqrt(105), e)
sage: logb(5,base=e)
logb(5, e)
sage: logb(e^2,base=e)
2
sage: logb(0,base=10)
-Infinity
With and without factorization::
sage: logb(3^5,base=10) #no factorization
logb(243, 10)
sage: logb(3^5,base=10,factorize=True)
5*logb(3, 10)
sage: logb(3^5*2^3,base=10) #no factorization
logb(1944, 10)
sage: logb(3^5*2^3,base=10,factorize=True)
5*logb(3, 10) + 3*logb(2, 10)
Latex printing of logb::
sage: latex( logb(e) )
1
sage: latex( logb(1,base=10) )
0
sage: latex( logb(e,base=10) )
\log\left(e\right)
sage: latex( logb(sqrt(105)) )
\ln\left(\sqrt{105}\right)
sage: latex( logb(3^5,base=10) )
\log\left(243\right)
sage: latex( logb(3^5,base=10,factorize=True) )
5 \, \log\left(3\right)
sage: latex( logb(3^5*2^3,base=10,factorize=True) )
5 \, \log\left(3\right) + 3 \, \log\left(2\right)
sage: latex( logb(3^5*2^3,base=3,factorize=True) )
5 \, \log_{3}\left(3\right) + 3 \, \log_{3}\left(2\right)
"""
#e is exp(1) in sage
r = log(x,base=base)
if r in ZZ or r in QQ or r==-Infinity: #Note: r in RR results in true if r=log(2/3,e) #OLD: SR(r).denominator()==1:
return r
else:
if factorize:
F = factor(x)
if factorize and type(F) == sage.structure.factorization_integer.IntegerFactorization:
l = [ factor_exponent * FORMALLOG(factor_base,base) for (factor_base,factor_exponent) in F ]
return add(l)
else:
return FORMALLOG(x,base)
#=======================
# pow for "high school"
#=======================
def _POW_latex(fun,basev,expv):
if basev==0 and expv!=0:
return r'0'
elif basev==1:
return r'1'
else:
return r'%s^{%s}' % (latex(basev),latex(expv))
bv,ev=SR.var('bv,ev')
#FORMALPOW = function('powb', bv, ev, print_latex_func=_POW_latex)
FORMALPOW = function('powb', nargs=2, print_latex_func=_POW_latex)
def powb(basev,expv):
r"""powb is an alternative to ``^`` from Sage that preserves ^ in latex.
See similar idea for logb.
INPUT:
- ``basev`` - the basis argument.
- ``expv`` - the exponent value.
OUTPUT:
- an expression based on ``powb`` that is converted by latex() to a^b without calculating.
Basic cases::
sage: powb(0,1)
0
sage: powb(1,2)
1
sage: powb(2,3)
powb(2, 3)
sage: latex( powb(2,3) )
2^{3}
"""
if basev==0 and expv!=0:
return 0
elif basev==1:
return 1
else:
return FORMALPOW(basev,expv)
#=======================
# factorial for "high school"
#=======================
def _FACT_latex(fun,x):
#fun is the new function name
return r'%s!' % latex(x)
# x=SR.var('x') see above.
#inerte: does not calulate factorial, only put "!".
#FACT_ = function('factb', x, print_latex_func=_FACT_latex)
FORMALFACT = function('factb', nargs=1, print_latex_func=_FACT_latex)
def factb(xv):
r"""factb is an alternative to ``factorial`` from Sage in the sense of representation: factb(x) is never calculated.
This version correct latex(120/factorial(5), hold=true) bug.
INPUT:
- ``xv`` - the argument of log.
OUTPUT:
- x! without calculating
Basic cases::
sage: factb(0)
factb(0)
sage: factb(1)
factb(1)
sage: factb(2)
factb(2)
sage: factb(5)
factb(5)
sage: latex( factb(5) )
5!
sage: latex( 120/ factb(5) )
\frac{120}{5!}
"""
return FORMALFACT(xv)
#===================================
# Old functions
# (that are in use in old problems)
#===================================
def showmul(x):
"""Deprecated:
Old way of writing parentesis on negative numbers.
"""
if x<0:
return '(' + latex(x) + ')'
else:
return x
# ==================
# MSC 15 -- Algebra
# ==================
def before_minor(M,pivot_row,pivot_col):
"""
A minor is the determinant of a submatrix of M.
This routine gives the matrix to which the determinant is calculated.
INPUT:
- ``M``: a square matrix n by n.
- ``pivot_row, pivot_col``: row and column nunbers (0 to n-1).
OUTPUT:
The submatrix of ``M`` extracting row ``pivot_row`` and column ``pivot_col``.
EXAMPLES::
sage: from megua.mathcommon import before_minor
sage: M = matrix(ZZ, [ [ 1, -25, -1, 0], [ 0, -2, -5, -2], [ 2, 1, -1, 0], [ 3, 1, -2, -13] ]); M
[ 1 -25 -1 0]
[ 0 -2 -5 -2]
[ 2 1 -1 0]
[ 3 1 -2 -13]
sage: before_minor(M,0,0)
[ -2 -5 -2]
[ 1 -1 0]
[ 1 -2 -13]
sage: before_minor(M,0,3)
[ 0 -2 -5]
[ 2 1 -1]
[ 3 1 -2]
sage: before_minor(M,3,3)
[ 1 -25 -1]
[ 0 -2 -5]
[ 2 1 -1]
sage: before_minor(M,3,0)
[-25 -1 0]
[ -2 -5 -2]
[ 1 -1 0]
sage: before_minor(M,0,2)
[ 0 -2 -2]
[ 2 1 0]
[ 3 1 -13]
sage: before_minor(M,3,2)
[ 1 -25 0]
[ 0 -2 -2]
[ 2 1 0]
sage: before_minor(M,2,0)
[-25 -1 0]
[ -2 -5 -2]
[ 1 -2 -13]
sage: before_minor(M,2,3)
[ 1 -25 -1]
[ 0 -2 -5]
[ 3 1 -2]
sage: before_minor(M,1,1)
[ 1 -1 0]
[ 2 -1 0]
[ 3 -2 -13]
AUTHORS:
- Pedro Cruz (2012/April)
- Paula Oliveira (2012/April)
"""
nrows,ncols = M.parent().dims()
#put values in 0-n-1 range.
nrows -= 1
ncols -= 1
if pivot_row==0 and pivot_col==0:
#pivot is at left top corner
return M[1:,1:]
elif pivot_row==0 and pivot_col==ncols:
#pivot is at right top corner
return M[1:,:-1]
elif pivot_row==nrows and pivot_col==ncols:
#pivot is at right bottom corner
return M[:-1,:-1]
elif pivot_row==nrows and pivot_col==0:
#pivot is at left bottom corner
return M[:-1,1:]
elif pivot_row==0:
#pivot is at first row any other col
M.subdivide( 1, [pivot_col,pivot_col+1])
return block_matrix( [ [M.subdivision(1,0),M.subdivision(1,2)]], subdivide=False)
elif pivot_row==nrows:
#pivot is at last row any other col
M.subdivide( nrows, [pivot_col,pivot_col+1])
return block_matrix( [ [M.subdivision(0,0),M.subdivision(0,2)]], subdivide=False)
elif pivot_col==0:
#pivot is at column 0 and any other row
M.subdivide( [pivot_row,pivot_row+1], 1)
return block_matrix( [ [M.subdivision(0,1)],[M.subdivision(2,1)]], subdivide=False)
elif pivot_col==ncols:
#pivot is at last column and any other row
M.subdivide( [pivot_row,pivot_row+1], ncols)
return block_matrix( [ [M.subdivision(0,0)],[M.subdivision(2,0)]], subdivide=False)
else:
M.subdivide( [pivot_row,pivot_row+1], [pivot_col,pivot_col+1])
return block_matrix( [ [M.subdivision(0,0),M.subdivision(0,2)], [M.subdivision(2,0),M.subdivision(2,2)] ], subdivide=False)
# ==================
# MSC 26 --
# ==================
# ==================
# MSC 60 -- probability
# ==================
def random_alpha():
"""
Returns a random alpha value (significance level).
(Used in statistics).
EXAMPLES::
sage: from megua.mathcommon import random_alpha
sage: random_alpha()
(5.00000000000000, 0.0500000000000000)
"""
#Significance Level
d = ur.iunif(0,3)
if d==0:
return (RealNumber('0.1'),RealNumber('0.001'))
elif d==2:
return (RealNumber('1'),RealNumber('0.01'))
elif d==3:
return (RealNumber('5'),RealNumber('0.05'))
else:
return (RealNumber('10'),RealNumber('0.1'))
def Percent(value):
"""
Given an alpha or 1-alpha value return the textual version without %.
EXAMPLES::
sage: from megua.mathcommon import Percent
sage: Percent(0.1) + "%"
'10%'
sage: Percent(0.12) + "%"
'12%'
"""
value = float(value)
if value == 0.01:
return r"1"
elif value == 0.05:
return r"5"
elif value == 0.10:
return r"10"
elif value == 0.90:
return r"90"
elif value == 0.95:
return r"95"
elif value == 0.975:
return r"97.5"
elif value == 0.99:
return r"99"
elif value == 0.995:
return r"99.5"
else:
return r"{0:g}".format(value*100)
#def random_pmf(n=6):
# #restart random number generator
# # See class Exercise for seed.activate()
# #Support (random)
# x0 = ur.iunif(-2,3) #start x_0
# h = ur.runif(0,2,1) #h space between
# #n = iunif(4,6) # fixed for start
# values = [x0 + h * i for i in range(n)]
# #Probabilities (random)
# lst = [runif(0,1,1) for i in range(n)]
# sumlst = sum(lst)#weighted sum
# probabilities = [fround(i/sumlst,2) for i in lst]
# #Correction
# newsum = sum(probabilities)
# probabilities[0] = probabilities[0] + (1-newsum)
# return {'values': values,'probabilities': probabilities}
# ==================
# MSC 62 -- statistics
# ==================
#Random numbers from R using RPy2
# 1. Always do casts to python rpy2 commands.
# 2. To do: study how does rpy2 works.
import rpy2
import rpy2.robjects as robjects
def qt(p,df,prec=None):
"""
Quantil from a t-student distribution.
NOTES:
* Based on RPy2 module (seed is from RPy2).
INPUT:
- ``p`` -- probability.
- ``df`` -- degree of freedom (distribution parameter).
- ``prec`` -- number of decimal digits (default all).
OUTPUT:
Quantil from t-student distribution.
EXAMPLES::
sage: from megua.mathcommon import qt
sage: qt(0.95,12)
1.7822875556493196
sage: qt(0.95,12,2)
1.78
"""
#qt(p, df, ncp, lower.tail = TRUE, log.p = FALSE)
qt = robjects.r['qt']
res = qt(float(p),int(df))[0]
if prec:
res = round(res,prec)
return res
def pnorm(x,mean,stdev,prec=None):
"""
Probability of a normal distribution(mean, stdev).
NOTES:
* Based on RPy2 module (seed is from RPy2).
INPUT:
- ``x`` -- some quantil.
- ``mean`` -- mean of the normal distribution.
- ``stdev`` -- standar deviation.
- ``prec`` -- number of decimal digits (default all).
OUTPUT:
:math:``P(X<=x) where X~Norm(mean,stdev).
EXAMPLES::
sage: from megua.mathcommon import pnorm
sage: pnorm(0,0,1)
0.5
sage: pnorm(1.644854,0.0,1.0)
0.9500000384745869
"""
#qt(p, df, ncp, lower.tail = TRUE, log.p = FALSE)
pnorm = robjects.r['pnorm']
res = pnorm(float(x),float(mean),float(stdev))[0]
if prec:
res = round(res,prec)
return res
#from sage.rings.integer import Integer
def r_stem(p_list,html=True):
"""
Return a string with a stem-and-leaf diagram based on R.
INPUT:
- `p_list': a python list
OUTPUT:
Return string with the diagram.
EXAMPLES:
TODOsage: from megua.mathcommon import r_stem
TODOsage: r_stem( [random() for _ in range(20)] ) #random
u'\n O ponto decimal est\xe1 1 d\xedgitos para a esquerda de |\n\n 0 | 283\n 2 | 334\n 4 | 468117\n 6 | 3348169\n 8 | 5\n\n'
TODOsage: r_stem( [int(100*random()) for _ in range(20)] )
u'\n O ponto decimal est\xe1 1 d\xedgito para a direita de |\n\n 0 | 60\n 2 | 1660\n 4 | 169\n 6 | 03457\n 8 | 091779\n\n'
#TODO : put this examples to work !
from random import random
l = [int(100*random()) for _ in range(20)]
print l
b = r_stem2( l )
#b = r_stem( [random() for _ in range(30)] )
print b
This module defines functions that use R software for statistics.
AUTHORS:
- Pedro Cruz (2014-03-07): initial version
LINKS:
- http://www.sagemath.org/doc/reference/interfaces/sage/interfaces/r.html
CODE STARTS HERE:
=============================
TODO: rebuild this function.
=============================
stemf = robjects.r['stem']
buf = []
def f(x):
# function that append its argument to the list 'buf'
buf.append(x)
# output from the R console will now be appended to the list 'buf'
rpy2.rinterface.setWriteConsole(f)
if type(p_list[0])==int: # or type(p_list[0])==sage.rings.integer.Integer:
stemf( robjects.IntVector(p_list) )
else:
stemf( robjects.FloatVector(p_list) )
#Parsing: The decimal point is 1 digit(s) to the right of the |
#The answer is a list of string in the "buf" variable.
#Keep record
buf1 = buf[1]
buf2 = buf[2]
#if buf[1] == ' The decimal point is ':
buf[1] = u" O ponto decimal está "
#get space position after the number.
sp = buf[2].index(' ')
if 'left' in buf[2]:
sideword = 'esquerda'
sideflag = True
elif 'right' in buf[2]:
sideword = 'direita'
sideflag = True
else:
sideword = 'em |\n'
sideflag = False
if sideflag:
if buf[2][:sp]=='1':
buf[2] = buf[2][:sp] + u" dígito para a %s de |\n\n" % sideword
else:
buf[2] = buf[2][:sp] + u" dígitos para a %s de |\n\n" % sideword
else:
buf[2] = sideword
#For debug only
buf.insert(3,buf1)
buf.insert(4,buf2)
jbuf = u''.join(buf)
#print jbuf
#print type(jbuf)
if html:
jbuf = u'''<div style="font-family: 'Courier New', monospace;"><pre>''' + jbuf + u"</pre></div>"
return jbuf
"""
return 'stem: todo things. call the programmer.'
# ==================
# MSC 65 -- numerical analysis
# ==================
"""
About polynomials
https://groups.google.com/group/sage-support/msg/4abc7d2c5ea97c2b?hl=pt
http://ask.sagemath.org/question/202/identification-polynomial
http://www.sagemath.org/doc/reference/sage/rings/polynomial/polynomial_ring_constructor.html
http://www.sagemath.org/doc/reference/sage/rings/polynomial/multi_polynomial_ring_generic.html
P.<x,y,z> = PolynomialRing(QQ)
P.random_element(2, 5)
-6/5*x^2 + 2/3*z^2 - 1
P.random_element(2, 5, choose_degree=True)
-1/4*x*y - 1/5*x*z - 1/14*y*z - z^2
"""
def support_set(fun,a,b,n,rdecimals):
"""
INPUT:
- ``fun``: some expression or function.
- ``a``: lower interval limit.
- ``b``: upper interval limit.
- ``n``: number of intervals.
OUTPUT:
-
"""
h = (b-a)/n
xset = [a + i * h for i in range(n+1)] #n+1points
xyset = [ (xv,fun.subs(x=xv)) for xv in xset]
return xyset
def random_basicLU3():
"""
Generate random matrix A (3x3) and decomposition LU where A=LU without permutation.
TODO: create a random dominant diagonal matrix module. MatrixSpace(QQ,3,3).
Used on exercise: E65F05_LU_001. Any change could afect it.
"""
A = random_matrix(ZZ,3,x=-3,y=3)
#d = A.diagonal()
A[0,0] = max( abs(A[0,0]) , abs(A[0,1])+abs(A[0,2])+ZZ.random_element(1,3) )
A[1,1] = max( abs(A[1,1]) , abs(A[1,0])+abs(A[1,2])+ZZ.random_element(1,3) )
A[2,2] = max( abs(A[2,2]) , abs(A[2,0])+abs(A[2,1])+ZZ.random_element(1,3) )
import numpy as np
import scipy.linalg as sl
npA = np.matrix(A)
npP,npL,npU = sl.lu(npA)
#print "MATRIZ A=",A
#print sl.lu(npA)
L = matrix(R15,npL)
U = matrix(R15,npU)
return A,L,U
#END mathcommon.py
|
gpl-3.0
|
alphaBenj/zipline
|
zipline/testing/predicates.py
|
2
|
15561
|
from contextlib import contextmanager
import datetime
from functools import partial
import inspect
import re
from nose.tools import ( # noqa
assert_almost_equal,
assert_almost_equals,
assert_dict_contains_subset,
assert_false,
assert_greater,
assert_greater_equal,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_less,
assert_less_equal,
assert_multi_line_equal,
assert_not_almost_equal,
assert_not_almost_equals,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_raises_regexp,
assert_regexp_matches,
assert_true,
assert_tuple_equal,
)
import numpy as np
import pandas as pd
from pandas.util.testing import (
assert_frame_equal,
assert_panel_equal,
assert_series_equal,
assert_index_equal,
)
from six import iteritems, viewkeys, PY2
from toolz import dissoc, keyfilter
import toolz.curried.operator as op
from zipline.testing.core import ensure_doctest
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.lib.labelarray import LabelArray
from zipline.utils.functional import dzip_exact, instance
from zipline.utils.math_utils import tolerant_equals
@instance
@ensure_doctest
class wildcard(object):
"""An object that compares equal to any other object.
This is useful when using :func:`~zipline.testing.predicates.assert_equal`
with a large recursive structure and some fields to be ignored.
Examples
--------
>>> wildcard == 5
True
>>> wildcard == 'ayy'
True
# reflected
>>> 5 == wildcard
True
>>> 'ayy' == wildcard
True
"""
@staticmethod
def __eq__(other):
return True
@staticmethod
def __ne__(other):
return False
def __repr__(self):
return '<%s>' % type(self).__name__
__str__ = __repr__
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
elif isinstance(func, partial):
return keywords(func.func)
return inspect.getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ''
return msg + '\n'
def _safe_cls_name(cls):
try:
return cls.__name__
except AttributeError:
return repr(cls)
def assert_is_subclass(subcls, cls, msg=''):
"""Assert that ``subcls`` is a subclass of ``cls``.
Parameters
----------
subcls : type
The type to check.
cls : type
The type to check ``subcls`` against.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert issubclass(subcls, cls), (
'%s is not a subclass of %s\n%s' % (
_safe_cls_name(subcls),
_safe_cls_name(cls),
msg,
)
)
def assert_regex(result, expected, msg=''):
"""Assert that ``expected`` matches the result.
Parameters
----------
result : str
The string to search.
expected : str or compiled regex
The pattern to search for in ``result``.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert re.search(expected, result), (
'%s%r not found in %r' % (_fmt_msg(msg), expected, result)
)
@contextmanager
def assert_raises_regex(exc, pattern, msg=''):
"""Assert that some exception is raised in a context and that the message
matches some pattern.
Parameters
----------
exc : type or tuple[type]
The exception type or types to expect.
pattern : str or compiled regex
The pattern to search for in the str of the raised exception.
msg : str, optional
An extra assertion message to print if this fails.
"""
try:
yield
except exc as e:
assert re.search(pattern, str(e)), (
'%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e))
)
else:
raise AssertionError('%s%s was not raised' % (_fmt_msg(msg), exc))
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg='', **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
assert result == expected, '%s%s != %s\n%s' % (
_fmt_msg(msg),
result,
expected,
_fmt_path(path),
)
@assert_equal.register(float, float)
def assert_float_equal(result,
expected,
path=(),
msg='',
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(' (with nan != nan)' if not float_equal_nan else ''),
_fmt_path(path),
)
def _check_sets(result, expected, msg, path, type_):
"""Compare two sets. This is used to check dictionary keys and sets.
Parameters
----------
result : set
expected : set
msg : str
path : tuple
type : str
The type of an element. For dict we use ``'key'`` and for set we use
``'element'``.
"""
if result != expected:
if result > expected:
diff = result - expected
msg = 'extra %s in result: %r' % (_s(type_, diff), diff)
elif result < expected:
diff = expected - result
msg = 'result is missing %s: %r' % (_s(type_, diff), diff)
else:
in_result = result - expected
in_expected = expected - result
msg = '%s only in result: %s\n%s only in expected: %s' % (
_s(type_, in_result),
in_result,
_s(type_, in_expected),
in_expected,
)
raise AssertionError(
'%s%ss do not match\n%s' % (
_fmt_msg(msg),
type_,
_fmt_path(path),
),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
viewkeys(result),
viewkeys(expected),
msg,
path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),),
'key',
)
failures = []
for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)):
try:
assert_equal(
resultv,
expectedv,
path=path + ('[%r]' % (k,),),
msg=msg,
**kwargs
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures))
@assert_equal.register(list, list)
@assert_equal.register(tuple, tuple)
def assert_sequence_equal(result, expected, path=(), msg='', **kwargs):
result_len = len(result)
expected_len = len(expected)
assert result_len == expected_len, (
'%s%s lengths do not match: %d != %d\n%s' % (
_fmt_msg(msg),
type(result).__name__,
result_len,
expected_len,
_fmt_path(path),
)
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv,
expectedv,
path=path + ('[%d]' % n,),
msg=msg,
**kwargs
)
@assert_equal.register(set, set)
def assert_set_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
result,
expected,
msg,
path,
'element',
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(result,
expected,
path=(),
msg='',
array_verbose=True,
array_decimal=None,
**kwargs):
f = (
np.testing.assert_array_equal
if array_decimal is None else
partial(np.testing.assert_array_almost_equal, decimal=array_decimal)
)
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError('\n'.join((str(e), _fmt_path(path))))
@assert_equal.register(LabelArray, LabelArray)
def assert_labelarray_equal(result, expected, path=(), **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
**kwargs
)
assert_equal(
result.as_int_array(),
expected.as_int_array(),
path=path + ('.as_int_array()',),
**kwargs
)
def _register_assert_equal_wrapper(type_, assert_eq):
"""Register a new check for an ndframe object.
Parameters
----------
type_ : type
The class to register an ``assert_equal`` dispatch for.
assert_eq : callable[type_, type_]
The function which checks that if the two ndframes are equal.
Returns
-------
assert_ndframe_equal : callable[type_, type_]
The wrapped function registered with ``assert_equal``.
"""
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_eq(
result,
expected,
**filter_kwargs(assert_eq, kwargs)
)
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))),
)
return assert_ndframe_equal
assert_frame_equal = _register_assert_equal_wrapper(
pd.DataFrame,
assert_frame_equal,
)
assert_panel_equal = _register_assert_equal_wrapper(
pd.Panel,
assert_panel_equal,
)
assert_series_equal = _register_assert_equal_wrapper(
pd.Series,
assert_series_equal,
)
assert_index_equal = _register_assert_equal_wrapper(
pd.Index,
assert_index_equal,
)
@assert_equal.register(pd.Categorical, pd.Categorical)
def assert_categorical_equal(result, expected, path=(), msg='', **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
msg=msg,
**kwargs
)
assert_equal(
result.codes,
expected.codes,
path=path + ('.codes',),
msg=msg,
**kwargs
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ('.' + attr,),
**kwargs
)
@assert_equal.register(
(datetime.datetime, np.datetime64),
(datetime.datetime, np.datetime64),
)
def assert_timestamp_and_datetime_equal(result,
expected,
path=(),
msg='',
allow_datetime_coercions=False,
compare_nat_equal=True,
**kwargs):
"""
Branch for comparing python datetime (which includes pandas Timestamp) and
np.datetime64 as equal.
Returns raises unless ``allow_datetime_coercions`` is passed as True.
"""
assert allow_datetime_coercions or type(result) == type(expected), (
"%sdatetime types (%s, %s) don't match and "
"allow_datetime_coercions was not set.\n%s" % (
_fmt_msg(msg),
type(result),
type(expected),
_fmt_path(path),
)
)
result = pd.Timestamp(result)
expected = pd.Timestamp(expected)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
assert_equal.dispatch(object, object)(
result,
expected,
path=path,
**kwargs
)
@assert_equal.register(slice, slice)
def assert_slice_equal(result, expected, path=(), msg=''):
diff_start = (
('starts are not equal: %s != %s' % (result.start, result.stop))
if result.start != expected.start else
''
)
diff_stop = (
('stops are not equal: %s != %s' % (result.stop, result.stop))
if result.stop != expected.stop else
''
)
diff_step = (
('steps are not equal: %s != %s' % (result.step, result.stop))
if result.step != expected.step else
''
)
diffs = diff_start, diff_stop, diff_step
assert not any(diffs), '%s%s\n%s' % (
_fmt_msg(msg),
'\n'.join(filter(None, diffs)),
_fmt_path(path),
)
def assert_isidentical(result, expected, msg=''):
assert result.isidentical(expected), (
'%s%s is not identical to %s' % (_fmt_msg(msg), result, expected)
)
try:
# pull the dshape cases in
from datashape.util.testing import assert_dshape_equal
except ImportError:
pass
else:
assert_equal.funcs.update(
dissoc(assert_dshape_equal.funcs, (object, object)),
)
|
apache-2.0
|
srowen/spark
|
examples/src/main/python/sql/arrow.py
|
23
|
9242
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
# NOTE that this file is imported in user guide in PySpark documentation.
# The codes are referred via line numbers. See also `literalinclude` directive in Sphinx.
from pyspark.sql import SparkSession
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def ser_to_frame_pandas_udf_example(spark):
import pandas as pd
from pyspark.sql.functions import pandas_udf
@pandas_udf("col1 string, col2 long")
def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
s3['col2'] = s1 + s2.str.len()
return s3
# Create a Spark DataFrame that has three columns including a struct column.
df = spark.createDataFrame(
[[1, "a string", ("a nested string",)]],
"long_col long, string_col string, struct_col struct<col1:string>")
df.printSchema()
# root
# |-- long_column: long (nullable = true)
# |-- string_column: string (nullable = true)
# |-- struct_column: struct (nullable = true)
# | |-- col1: string (nullable = true)
df.select(func("long_col", "string_col", "struct_col")).printSchema()
# |-- func(long_col, string_col, struct_col): struct (nullable = true)
# | |-- col1: string (nullable = true)
# | |-- col2: long (nullable = true)
def ser_to_ser_pandas_udf_example(spark):
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a: pd.Series, b: pd.Series) -> pd.Series:
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
def iter_ser_to_iter_ser_pandas_udf_example(spark):
from typing import Iterator
import pandas as pd
from pyspark.sql.functions import pandas_udf
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# Declare the function and create the UDF
@pandas_udf("long")
def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
for x in iterator:
yield x + 1
df.select(plus_one("x")).show()
# +-----------+
# |plus_one(x)|
# +-----------+
# | 2|
# | 3|
# | 4|
# +-----------+
def iter_sers_to_iter_ser_pandas_udf_example(spark):
from typing import Iterator, Tuple
import pandas as pd
from pyspark.sql.functions import pandas_udf
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# Declare the function and create the UDF
@pandas_udf("long")
def multiply_two_cols(
iterator: Iterator[Tuple[pd.Series, pd.Series]]) -> Iterator[pd.Series]:
for a, b in iterator:
yield a * b
df.select(multiply_two_cols("x", "x")).show()
# +-----------------------+
# |multiply_two_cols(x, x)|
# +-----------------------+
# | 1|
# | 4|
# | 9|
# +-----------------------+
def ser_to_scalar_pandas_udf_example(spark):
import pandas as pd
from pyspark.sql.functions import pandas_udf
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
# Declare the function and create the UDF
@pandas_udf("double")
def mean_udf(v: pd.Series) -> float:
return v.mean()
df.select(mean_udf(df['v'])).show()
# +-----------+
# |mean_udf(v)|
# +-----------+
# | 4.2|
# +-----------+
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
def grouped_apply_in_pandas_example(spark):
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").applyInPandas(subtract_mean, schema="id long, v double").show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
def map_in_pandas_example(spark):
df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
def filter_func(iterator):
for pdf in iterator:
yield pdf[pdf.id == 1]
df.mapInPandas(filter_func, schema=df.schema).show()
# +---+---+
# | id|age|
# +---+---+
# | 1| 21|
# +---+---+
def cogrouped_apply_in_pandas_example(spark):
import pandas as pd
df1 = spark.createDataFrame(
[(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
("time", "id", "v1"))
df2 = spark.createDataFrame(
[(20000101, 1, "x"), (20000101, 2, "y")],
("time", "id", "v2"))
def asof_join(l, r):
return pd.merge_asof(l, r, on="time", by="id")
df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
asof_join, schema="time int, id int, v1 double, v2 string").show()
# +--------+---+---+---+
# | time| id| v1| v2|
# +--------+---+---+---+
# |20000101| 1|1.0| x|
# |20000102| 1|3.0| x|
# |20000101| 2|2.0| y|
# |20000102| 2|4.0| y|
# +--------+---+---+---+
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf example: Series to Frame")
ser_to_frame_pandas_udf_example(spark)
print("Running pandas_udf example: Series to Series")
ser_to_ser_pandas_udf_example(spark)
print("Running pandas_udf example: Iterator of Series to Iterator of Series")
iter_ser_to_iter_ser_pandas_udf_example(spark)
print("Running pandas_udf example: Iterator of Multiple Series to Iterator of Series")
iter_sers_to_iter_ser_pandas_udf_example(spark)
print("Running pandas_udf example: Series to Scalar")
ser_to_scalar_pandas_udf_example(spark)
print("Running pandas function example: Grouped Map")
grouped_apply_in_pandas_example(spark)
print("Running pandas function example: Map")
map_in_pandas_example(spark)
print("Running pandas function example: Co-grouped Map")
cogrouped_apply_in_pandas_example(spark)
spark.stop()
|
apache-2.0
|
hoechenberger/psychopy
|
psychopy/demos/coder/timing/timeByFrames.py
|
1
|
2409
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The most accurate way to time your stimulus presentation is to
present for a certain number of frames. For that to work you need
your window flips to synchronize to the monitor and not to drop
any frames. This script examines the precision of your frame flips.
Shut down as many applications as possible, especially those that
might try to update
"""
from __future__ import division
from __future__ import print_function
from builtins import range
from psychopy import visual, logging, core, event
visual.useFBO = True # if available (try without for comparison)
import matplotlib
matplotlib.use('Qt4Agg') # change this to control the plotting 'back end'
import pylab
nIntervals = 500
win = visual.Window([1280, 1024], fullscr=True, allowGUI=False, waitBlanking=True)
progBar = visual.GratingStim(win, tex=None, mask=None,
size=[0, 0.05], color='red', pos=[0, -0.9], autoLog=False)
myStim = visual.GratingStim(win, tex='sin', mask='gauss',
size=300, sf=0.05, units='pix', autoLog=False)
# logging.console.setLevel(logging.INFO)# uncomment to log every frame
win.recordFrameIntervals = True
for frameN in range(nIntervals + 1):
progBar.setSize([2.0 * frameN/nIntervals, 0.05])
progBar.draw()
myStim.setPhase(0.1, '+')
myStim.draw()
if event.getKeys():
print('stopped early')
break
win.logOnFlip(msg='frame=%i' %frameN, level=logging.EXP)
win.flip()
win.fullscr = False
win.close()
# calculate some values
intervalsMS = pylab.array(win.frameIntervals) * 1000
m = pylab.mean(intervalsMS)
sd = pylab.std(intervalsMS)
# se=sd/pylab.sqrt(len(intervalsMS)) # for CI of the mean
msg = "Mean=%.1fms, s.d.=%.2f, 99%%CI(frame)=%.2f-%.2f"
distString = msg % (m, sd, m - 2.58 * sd, m + 2.58 * sd)
nTotal = len(intervalsMS)
nDropped = sum(intervalsMS > (1.5 * m))
msg = "Dropped/Frames = %i/%i = %.3f%%"
droppedString = msg % (nDropped, nTotal, 100 * nDropped / float(nTotal))
# plot the frameintervals
pylab.figure(figsize=[12, 8])
pylab.subplot(1, 2, 1)
pylab.plot(intervalsMS, '-')
pylab.ylabel('t (ms)')
pylab.xlabel('frame N')
pylab.title(droppedString)
pylab.subplot(1, 2, 2)
pylab.hist(intervalsMS, 50, normed=0, histtype='stepfilled')
pylab.xlabel('t (ms)')
pylab.ylabel('n frames')
pylab.title(distString)
pylab.show()
win.close()
core.quit()
# The contents of this file are in the public domain.
|
gpl-3.0
|
adamrvfisher/TechnicalAnalysisLibrary
|
SpeedDaterPriceRelativeMovAvg.py
|
1
|
8135
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
#from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
from ListPairs import ListPairs
Empty = []
Start = t.time()
Counter = 0
Counter2 = 0
iterations = range(0, 10000)
Dataset2 = pd.DataFrame()
#Input
tickers = ('TLT', 'SPY', 'TMF', 'AAPL', 'PBF', 'UVXY', '^VIX', 'GLD', 'SLV',
'JO','CORN', 'DBC', 'SOYB')
#Make all pairs in final list
MajorList = ListPairs(tickers)
#Here we go
#Brute Force Optimization
for m in MajorList:
Dataset = pd.DataFrame()
Ticker1 = m[0]
Ticker2 = m[1]
TAG = m[0] + '/' + m[1]
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
#pull online data, change to local for testing
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#get log returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Match lengths
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
#
for i in iterations:
Counter = Counter + 1
aa = rand.random() * 2 #uniformly distributed random number 0 to 2
a = aa - 1 #a > 1 indicating long position in a
bb = rand.random()
if bb >= .5:
bb = 1
else:
bb = -1
b = bb * (1 - abs(a))
#you can change c and d to 0 by default if you want to just go flat
cc = rand.random() * 2 #uniformly distributed random number 0 to 2
c = cc - 1 #cc > 1 indicating long position in c
dd = rand.random() * 2
if dd >= 1:
edd = 1
else:
edd = -1
d = (dd - 1)
if abs(c) + abs(d) > 1:
continue
e = rand.randint(3,20)
window = int(e)
#
Asset1['PriceRelative'] = Asset1['Adj Close']/Asset2['Adj Close']
Asset1['PRMA'] = Asset1['PriceRelative'].rolling(window=window, center=False).mean()
Asset1['Position'] = a
Asset1['Position'] = np.where(Asset1['PriceRelative'].shift(1) > Asset1['PRMA'].shift(1),
c,a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['Position'] = np.where(Asset1['PriceRelative'].shift(1) > Asset1['PRMA'].shift(1),
d,b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) #* (-1) #Pass a short position?
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1) #Pass a short position?
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.51):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .0003:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
#find optimal parameters from pair
z1 = Dataset.iloc[6]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[6]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[6]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
Dataset[TAG] = Dataset[kfloat]
Dataset2[TAG] = Dataset[TAG]
Dataset2 = Dataset2.rename(columns = {Counter2:TAG})
Counter2 = Counter2 + 1
# print(Dataset[TAG])
Portfolio2 = pd.DataFrame()
#find some winning parameters
z1 = Dataset2.iloc[6]
w1 = np.percentile(z1, 99)
v1 = [] #this variable stores the Nth percentile of top performers
winners = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset2.columns[(Dataset2 == j).iloc[6]]
winners = pd.concat([winners,Dataset2[r]], axis = 1)
y = max(z1)
k = Dataset2.columns[(Dataset2 == y).iloc[6]] #this is the name of the pair
kfloat = str(k[0])
#most likely, you will want to export to csv for further future investigation
#print(Dataset[TAG])
num = kfloat.find('/')
num2 = num + 1
#you will need to re-call the Asset1 and Asset2 time series and log returns start here!!!
Asset3 = YahooGrabber(kfloat[:num])
Asset4 = YahooGrabber(kfloat[num2:])
trim = abs(len(Asset3) - len(Asset4))
if len(Asset3) == len(Asset4):
pass
else:
if len(Asset3) > len(Asset4):
Asset3 = Asset3[trim:]
else:
Asset4 = Asset4[trim:]
#get log returns
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
Asset4['LogRet'] = np.log(Asset4['Adj Close']/Asset4['Adj Close'].shift(1))
Asset4['LogRet'] = Asset4['LogRet'].fillna(0)
window = int((Dataset2[kfloat][4]))
Asset3['PriceRelative'] = Asset3['Adj Close']/Asset4['Adj Close']
Asset3['PRMA'] = Asset3['PriceRelative'].rolling(window=window, center=False).mean()
Asset3['Position'] = (Dataset2[k[0]][0])
Asset3['Position'] = np.where(Asset3['PriceRelative'].shift(1) > Asset3['PRMA'].shift(1),
Dataset2[k[0]][2],Dataset2[k[0]][0])
Asset3['Pass'] = (Asset3['LogRet'] * Asset3['Position'])
Asset4['Position'] = (Dataset2[kfloat][1])
Asset4['Position'] = np.where(Asset3['PriceRelative'].shift(1) > Asset3['PRMA'].shift(1),
Dataset2[k[0]][3],Dataset2[k[0]][1])
Asset4['Pass'] = (Asset4['LogRet'] * Asset4['Position'])
#
Portfolio2['Asset3Pass'] = Asset3['Pass'] #* (-1)
Portfolio2['Asset4Pass'] = Asset4['Pass'] #* (-1)
Portfolio2['LongShort'] = Portfolio2['Asset3Pass'] + Portfolio2['Asset4Pass']
Portfolio2['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio2['LongShort'].mean()
dailyvol = Portfolio2['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio2['Multiplier'] = Portfolio2['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio2['Multiplier'].div(Portfolio2['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(kfloat)
print('--------')
print(Dataset2[kfloat])
print('Max Drawdown is ',max(drawdown2),'See Dataset2')
##pd.to_pickle(Portfolio, 'VXX:UVXY')
|
apache-2.0
|
meizhoubao/pyimagesearch
|
compare-histograms/comphis.py
|
1
|
3329
|
from scipy.spatial import distance as dist
import matplotlib.pyplot as plt
import numpy as np
import argparse
import glob
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory of images")
args = vars(ap.parse_args())
index = {}
images = {}
for imagePath im glob.glob(args["dataset"] + "/*.png"):
filename = imagePath[imagePath.rfind("/") + 1:]
image = cv2.imread(imagePath)
images[filename] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
hist = cv2.calcHist([image], [0, 1, 2], None, [8, 8, 8],
[0, 256, 0, 256, 0, 256])
hist = cv2.normalize(hist).flatten()
index[filename] = hist
# METHOD 1
OPENCV_METHODS = (("Correlation", cv2.cv.CV_COMP_CORREL),
("Chi-Squared", cv2.cv.CV_COMP_CHISQR),
("Intersection", cv2.cv.CV_COMP_INTERSECT),
("Hellinger", cv2.cv.CV_COMP_BHATTACHARYYA))
for (methodName, method) in OPENCV_METHODS:
results = {}
reverse = False
if methodName in ("Correlation", "Intersection"):
reverse = True
for (k, hist) in index.items():
d = cv2.compareHist(index["doge.png"], hist, method)
results[k] = d
results = sorted([(v, k) for (k, v) in results.items()], reverse=reverse)
fig = plt.figure("Query")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(images["doge.png"])
plt.axis("off")
fig = (i, (v, k)) in enumerate(results):
ax = fig.add_subplot(1, len(images), i + 1)
ax.set_title("%s: %.2f" % (k, v))
plt.imshow(images[k])
plt.axis("off")
plt.show()
# METHOD 2
SCIPY_METHODS = (("Euclidean", dist.euclidean),
("Manhanttan", dist.cityblock),
("Chebysev", dist.chebyshev))
for (methodName, method) in SCIPY_METHODS:
results = {}
for (k, hist) in index.items():
d = method(index["doge.png"], hist)
results[k] = d
results = sorted([(v, k) for (k, v) in results.items()])
fig = plt.figure("Query")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(images["doge.png"])
plt.axis("off")
fig = plt.figure("Results: %s" (methodName))
fig.subtitle(methodName, fontsize=20)
for (i, (v, k)) in enumerate(results):
ax = fig.add_subplot(1, len(images), i+1)
ax.set_title("%s: %.2f" % (k, v))
plt.imshow(image[k])
plt.axis("off")
plt.show()
# METHOD 3
def chi2_distance(histA, histB, eps=1e-10):
d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps) for (a, b) in zip(histA, histB)])
return d
# initialize the results dictionary
results = {}
# loop over the index
for (k, hist) in index.items():
d = chi2_distance(index["doge.png"], hist)
results[k] = d
# sort the results
results = sorted([(v, k) for (k, v) in results.items()])
# show the query image
fig = plt.figure("Query")
ax = fig.add_subplot(1, 1, 1)
ax.imshow(images["doge.png"])
plt.axis("off")
# initialize the results figure
fig = plt.figure("Results: Custom Chi-Squared")
fig.suptitle("Custom Chi-Squared", fontsize=20)
# loop over the results
for (i, (v, k)) in enumerate(results):
ax = fig.add_subplot(1, len(images), i + 1)
ax.set_title("%s: %.2f" % (k, v))
plt.imshow(images[k])
plt.axis("off")
# show the custom method
plt.show()
|
gpl-3.0
|
bhargav/scikit-learn
|
sklearn/feature_extraction/text.py
|
15
|
50250
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df or min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
trachelr/mne-python
|
examples/inverse/plot_covariance_whitening_dspm.py
|
7
|
6368
|
# doc:slow-example
"""
===================================================
Demonstrate impact of whitening on source estimates
===================================================
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in [1]. The example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in [1]. Please do not copy the
patterns presented here for your own analysis, this is example is purely
illustrative.
Note that this example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
References
----------
[1] Engemann D. and Gramfort A. (2015) Automated model selection in covariance
estimation and spatial whitening of MEG and EEG signals, vol. 108,
328-342, NeuroImage.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
##############################################################################
# Get data
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='iir', n_jobs=1)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = None # no baseline as high-pass is applied
reject = dict(mag=3e-12)
# Make source space
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
overwrite=True, add_dist=False)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
clim = dict(kind='value', lims=[0, 2.5, 5])
###############################################################################
# Estimate covariance and show resulting source estimates
method = 'empirical', 'shrunk'
best_colors = 'steelblue', 'red'
samples_epochs = 5, 15,
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 6))
def brain_to_mpl(brain):
"""convert image to be usable with matplotlib"""
tmp_path = op.abspath(op.join(op.curdir, 'my_tmp'))
brain.save_imageset(tmp_path, views=['ven'])
im = imread(tmp_path + '_ven.png')
os.remove(tmp_path + '_ven.png')
return im
for n_train, (ax_stc_worst, ax_dynamics, ax_stc_best) in zip(samples_epochs,
(axes1, axes2)):
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
epochs_train.equalize_event_counts(event_ids, copy=False)
noise_covs = compute_covariance(epochs_train, method=method,
tmin=None, tmax=0, # baseline only
return_estimators=True) # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
# compute stc based on worst and best
for est, ax, kind, color in zip(noise_covs, (ax_stc_worst, ax_stc_best),
['best', 'worst'], best_colors):
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
inverse_operator = make_inverse_operator(epochs_train.info, forward,
est, loose=0.2, depth=0.8,
rank=274)
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim)
brain.set_time(175)
im = brain_to_mpl(brain)
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False), ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(est['method'], kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set_title('{0} epochs'.format(n_train * 2))
ax_dynamics.set_xlabel('Time (ms)')
ax_dynamics.set_ylabel('Source Activation (dSPM)')
ax_dynamics.set_xlim(tmin * 1e3, tmax * 1e3)
ax_dynamics.set_ylim(-3, 3)
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.4, left=0.03, right=0.98, wspace=0.07)
fig.canvas.draw()
fig.show()
|
bsd-3-clause
|
tcmoore3/mdtraj
|
mdtraj/utils/__init__.py
|
7
|
3668
|
from __future__ import print_function, division
import time
import warnings
from mdtraj.utils.delay_import import import_
from mdtraj.utils.validation import ensure_type, cast_indices, check_random_state
from mdtraj.utils.unit import in_units_of
from mdtraj.utils.rotation import rotation_matrix_from_quaternion, uniform_quaternion
from mdtraj.utils.unitcell import (lengths_and_angles_to_box_vectors,
box_vectors_to_lengths_and_angles)
from mdtraj.utils.contextmanagers import timing, enter_temp_directory
from mdtraj.utils.zipped import open_maybe_zipped
__all__ = ["ensure_type", "import_", "in_units_of",
"lengths_and_angles_to_box_vectors",
"box_vectors_to_lengths_and_angles",
"ilen", "timing", "cast_indices", "check_random_state",
"rotation_matrix_from_quaternion", "uniform_quaternion",
"enter_temp_directory", "timing", "deprecated"]
def ilen(iterable):
"""Length of an iterator. Note, this consumes the iterator
Parameters
----------
iterable : iterable
An iterable, such as a generator, list, etc.
Returns
-------
length : int
The number of elements in the iterable
"""
return sum(1 for _ in iterable)
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Copied from scikit-learn: sklearn/utils/__init__.py
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
|
lgpl-2.1
|
liulion/mayavi
|
docs/source/mayavi/auto/mlab_3D_to_2D.py
|
4
|
11050
|
"""
A script to calculate the projection of 3D world coordinates to
2D display coordinates (pixel coordinates) for a given scene.
The 2D pixel locations of objects in the image plane are related to their
3D world coordinates by a series of linear transformations. The specific
transformations fall under the group known as projective transformations.
This set includes pure projectivities, affine transformations,
perspective transformations, and euclidean transformations. In the case
of mlab (and most other computer visualization software), we deal with
only the perspective and euclidean cases. An overview of Projective space
can be found here: http://en.wikipedia.org/wiki/Projective_space and a
thorough treatment of projective geometry can be had in the book
"Multiple View Geometry in Computer Vision" by Richard Hartley.
The essential thing to know for this example is that points in 3-space
are related to points in 2-space through a series of multiplications of
4x4 matrices which are the perspective and euclidean transformations. The
4x4 matrices predicate the use of length 4 vectors to represent points.
This representation is known as homogeneous coordinates, and while they
appear foriegn at first, they truly simplify all the mathematics
involved. In short, homogeneous coordinates are your friend, and you
should read about them here:
http://en.wikipedia.org/wiki/Homogeneous_coordinates
In the normal pinhole camera model (the ideal real world model), 3D world
points are related to 2D image points by the matrix termed the
'essential' matrix which is a combination of a perspective transformation
and a euclidean transformation. The perspective transformation is defined
by the camera intrinsics (focal length, imaging sensor offset, etc...)
and the euclidean transformation is defined by the cameras position and
orientation. In computer graphics, things are not so simple. This is
because computer graphics have the benefit of being able to do things
which are not possible in the real world: adding clipping planes, offset
projection centers, arbitrary distortions, etc... Thus, a slightly
different model is used.
What follows is the camera/view model for OpenGL and thus, VTK. I can not
guarantee that other packages follow this model.
There are 4 different transformations that are applied 3D world
coordinates to map them to 2D pixel coordinates. They are: the model
transform, the view transform, the perspective transform, and the
viewport or display transform.
In OpenGL the first two transformations are concatenated to yield the
modelview transform (called simply the view transform in VTK). The
modelview transformation applies arbitrary scaling and distortions to the
model (if they are specified) and transforms them so that the orientation
is the equivalent of looking down the negative Z axis. Imagine its as if
you relocate your camera to look down the negative Z axis, and then move
everything in the world so that you see it now as you did before you
moved the camera. The resulting coordinates are termed "eye" coordinates
in OpenGL (I don't know that they have a name in VTK).
The perspective transformation applies the camera perspective to the eye
coordinates. This transform is what makes objects in the foreground look
bigger than equivalent objects in the background. In the pinhole camera
model, this transform is determined uniquely by the focal length of the
camera and its position in 3-space. In Vtk/OpenGL it is determined by the
frustum. A frustum is simply a pyramid with the top lopped off. The top
of the pyramid (a point) is the camera location, the base of the pyramid
is a plane (the far clipping plane) defined as normal to principle camera
ray at distance termed the far clipping distance, the top of the frustum
(where it's lopped off) is the near clipping plane, with a definition
similar to that of the far clipping plane. The sides of the frustum are
determined by the aspect ratio of the camera (width/height) and its
field-of-view. Any points not lying within the frustum are not mapped to
the screen (as they would lie outside the viewable area). The
perpspective transformation has the effect of scaling everything within
the frustum to fit within a cube defined in the range (-1,1)(-1,1)(-1,1)
as represented by homogeneous coordinates. The last phrase there is
important, the first 3 coordinates will not, in general, be within the
unity range until we divide through by the last coordinate (See the
wikipedia on homogeneous coordinates if this is confusing). The resulting
coordinates are termed (appropriately enough) normalized view
coordinates.
The last transformation (the viewport transformation) takes us from
normalized view coordinates to display coordinates. At this point, you
may be asking yourself 'why not just go directly to display coordinates,
why need normalized view coordinates at all?', the answer is that we may
want to embed more than one view in a particular window, there will
therefore be different transformations to take each view to an
appropriate position an size in the window. The normalized view
coordinates provide a nice common ground so-to-speak. At any rate, the
viewport transformation simply scales and translates the X and Y
coordinates of the normalized view coordinates to the appropriate pixel
coordinates. We don't use the Z value in our example because we don't
care about it. It is used for other various things however.
That's all there is to it, pretty simple right? Right. Here is an overview:
Given a set of 3D world coordinates:
- Apply the modelview transformation (view transform in VTK) to get eye
coordinates
- Apply the perspective transformation to get normalized view coordinates
- Apply the viewport transformation to get display coordinates
VTK provides a nice method to retrieve a 4x4 matrix that combines the
first two operations. As far as I can tell, VTK does not export a method
to retrieve the 4x4 matrix representing the viewport transformation, so
we are on our there to create one (no worries though, its not hard, as
you will see).
Now that the prelimenaries are out of the way, lets get started.
"""
# Author: S. Chris Colbert <[email protected]>
# Copyright (c) 2009, S. Chris Colbert
# License: BSD Style
# this import is here because we need to ensure that matplotlib uses the
# wx backend and having regular code outside the main block is PyTaboo.
# It needs to be imported first, so that matplotlib can impose the
# version of Wx it requires.
import matplotlib
matplotlib.use('WXAgg')
import pylab as pl
import numpy as np
from mayavi import mlab
from mayavi.core.ui.mayavi_scene import MayaviScene
def get_world_to_view_matrix(mlab_scene):
"""returns the 4x4 matrix that is a concatenation of the modelview transform and
perspective transform. Takes as input an mlab scene object."""
if not isinstance(mlab_scene, MayaviScene):
raise TypeError('argument must be an instance of MayaviScene')
# The VTK method needs the aspect ratio and near and far clipping planes
# in order to return the proper transform. So we query the current scene
# object to get the parameters we need.
scene_size = tuple(mlab_scene.get_size())
clip_range = mlab_scene.camera.clipping_range
aspect_ratio = float(scene_size[0])/float(scene_size[1])
# this actually just gets a vtk matrix object, we can't really do anything with it yet
vtk_comb_trans_mat = mlab_scene.camera.get_composite_perspective_transform_matrix(
aspect_ratio, clip_range[0], clip_range[1])
# get the vtk mat as a numpy array
np_comb_trans_mat = vtk_comb_trans_mat.to_array()
return np_comb_trans_mat
def get_view_to_display_matrix(mlab_scene):
""" this function returns a 4x4 matrix that will convert normalized
view coordinates to display coordinates. It's assumed that the view should
take up the entire window and that the origin of the window is in the
upper left corner"""
if not (isinstance(mlab_scene, MayaviScene)):
raise TypeError('argument must be an instance of MayaviScene')
# this gets the client size of the window
x, y = tuple(mlab_scene.get_size())
# normalized view coordinates have the origin in the middle of the space
# so we need to scale by width and height of the display window and shift
# by half width and half height. The matrix accomplishes that.
view_to_disp_mat = np.array([[x/2.0, 0., 0., x/2.0],
[ 0., -y/2.0, 0., y/2.0],
[ 0., 0., 1., 0.],
[ 0., 0., 0., 1.]])
return view_to_disp_mat
def apply_transform_to_points(points, trans_mat):
"""a function that applies a 4x4 transformation matrix to an of
homogeneous points. The array of points should have shape Nx4"""
if not trans_mat.shape == (4, 4):
raise ValueError('transform matrix must be 4x4')
if not points.shape[1] == 4:
raise ValueError('point array must have shape Nx4')
return np.dot(trans_mat, points.T).T
if __name__ == '__main__':
f = mlab.figure()
N = 4
# create a few points in 3-space
X = np.random.random_integers(-3, 3, N)
Y = np.random.random_integers(-3, 3, N)
Z = np.random.random_integers(-3, 3, N)
# plot the points with mlab
pts = mlab.points3d(X, Y, Z)
# now were going to create a single N x 4 array of our points
# adding a fourth column of ones expresses the world points in
# homogenous coordinates
W = np.ones(X.shape)
hmgns_world_coords = np.column_stack((X, Y, Z, W))
# applying the first transform will give us 'unnormalized' view
# coordinates we also have to get the transform matrix for the
# current scene view
comb_trans_mat = get_world_to_view_matrix(f.scene)
view_coords = \
apply_transform_to_points(hmgns_world_coords, comb_trans_mat)
# to get normalized view coordinates, we divide through by the fourth
# element
norm_view_coords = view_coords / (view_coords[:, 3].reshape(-1, 1))
# the last step is to transform from normalized view coordinates to
# display coordinates.
view_to_disp_mat = get_view_to_display_matrix(f.scene)
disp_coords = apply_transform_to_points(norm_view_coords, view_to_disp_mat)
# at this point disp_coords is an Nx4 array of homogenous coordinates
# where X and Y are the pixel coordinates of the X and Y 3D world
# coordinates, so lets take a screenshot of mlab view and open it
# with matplotlib so we can check the accuracy
img = mlab.screenshot()
pl.imshow(img)
for i in range(N):
print 'Point %d: (x, y) ' % i, disp_coords[:, 0:2][i]
pl.plot([disp_coords[:, 0][i]], [disp_coords[:, 1][i]], 'ro')
pl.show()
# you should check that the printed coordinates correspond to the
# proper points on the screen
mlab.show()
#EOF
|
bsd-3-clause
|
markus-antero/Stock
|
data/geographic/shapefileGeo.py
|
1
|
10384
|
'''
Created on 8.5.2017
TODO - packages not loaded but applicable
- geojson
- shapely
- seaborn as sns
- shapely.wkt, wkt = http://www.geoapi.org/3.0/javadoc/org/opengis/referencing/doc-files/WKT.html
@author: Markus.Walden
'''
#Array
from datetime import datetime
import shapefile
import geopandas as gp
from geopandas import datasets
import pandas as pd
from shapely.geometry import Point
#SQL
import sqlalchemy as sqla
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import Geometry
from sqlalchemy.orm import sessionmaker
#computer
import sys
from geographic import engineString
#map
import matplotlib.pyplot as plt
plt.style.use('bmh')
Base = declarative_base()
class GeographicNE(Base):
'''
classdocs
'''
__tablename__ = 'GeographicNE'
index = sqla.Column(sqla.Integer)
continent = sqla.Column(sqla.NVARCHAR(50))
gdp_md_est = sqla.Column(sqla.Float)
iso_a3 = sqla.Column(sqla.NVARCHAR(50), primary_key=True)
name = sqla.Column(sqla.NVARCHAR(50))
pop_est = sqla.Column(sqla.Float)
geometry = sqla.Column(Geometry("POLYGON"))
def __init__(self, params):
'''
Constructor
'''
def __repr__(self):
#"(id='%s', Date='%s', Type='%s', Value='%s')" % (self.id, self.Date, self.Type, self.Value)
return ""
class Cities(Base):
__tablename__ = 'cities'
name = sqla.Column(sqla.NVARCHAR(50), primary_key=True)
geometry = sqla.Column(Geometry("POINT"))
class Lake(Base):
__tablename__ = 'lakes'
# id = sqla.Column(sqla.Integer)
name = sqla.Column(sqla.NVARCHAR(50), primary_key=True)
depth = sqla.Column(sqla.Integer, default = 0)
created = sqla.Column(sqla.DateTime, default=datetime.now())
geom = sqla.Column(Geometry("POLYGON"))
def main():
'''
shapefileTest()
---------------
- test to print shapefile content
- divided to two files dbf and shp
- uses dictionaries as resultsets to contain data related to location and the location as polycon
Using datasets geopandas for country and city statistics OR Using the gadm28 dataset
- http://stackoverflow.com/questions/31997859/bulk-insert-a-pandas-dataframe-using-sqlalchemy
crs (coordinate system )
http://stackoverflow.com/questions/3845006/database-of-countries-and-their-cities
'''
naturalEarthToCSV = False
esriShapefileToGeopandas = False
loadShapefileData = False
combineDataForCities = True
if naturalEarthToCSV:
gp_world, gp_cities = generateWorldToDB(loadCSV = True)
print ('Countries: ', gp_world)
print ('Cities: ', gp_cities)
if esriShapefileToGeopandas:
'''
'OBJECTID', 'geometry', 'UID', 'ID_0', 'ISO', 'NAME_0',
'REGION', 'VARREGION', 'Shape_Leng', 'Shape_Area'
'ID_1', 'NAME_1',
'ID_2', 'NAME_2',
'ID_3', 'NAME_3',
'ID_4', 'NAME_4',
'ID_5', 'NAME_5',
'''
shp = gp.GeoDataFrame.from_file('./gadm28/gadm28.shp')
shp_1 = shp[['OBJECTID', 'geometry']]
shp = shp[['OBJECTID', 'UID', 'ID_0', 'ISO', 'NAME_0', 'REGION',
'VARREGION', 'Shape_Leng', 'Shape_Area', 'ID_1', 'NAME_1','ID_2', 'NAME_2',
'ID_3', 'NAME_3', 'ID_4', 'NAME_4', 'ID_5', 'NAME_5']]
#save X,Y into csv file
shp.to_csv("./data/allData.csv",header=True,index=False,sep="\t")
shp_1.to_csv("./data/allData_geom.csv",header=True,index=False,sep="\t")
print (shp)
if loadShapefileData:
shapefileTest(i = 0, i_max = 50)
if combineDataForCities:
'''
cities: Country,City,AccentCity,Region,Population,Latitude,Longitude
- Country, City, Population,Latitude,Longitude - link to add iso3
countrycodes: euname,modified,linked_country,iso3,iso2,grc,isonum,country,imperitive
- country, iso3, iso2
- define datasets
- merge with country
- add geometry
- store to csv
'''
df_cities = pd.read_csv("./data/worldcitiespop.csv", sep = ',', encoding = "ISO-8859-1", header = 0,
names=['Country','City','AccentCity','Region','Population','Latitude','Longitude'])
df_cities = df_cities[['Country','City','Region','Population','Latitude','Longitude']]
df_cities.columns = ['iso2', 'City','Region','Population','Latitude','Longitude']
df_cities['iso2'] = df_cities['iso2'].str.upper()
df_cities = df_cities[df_cities['Population'] > 50000]
df_countryCodes = pd.read_csv("./data/countryISO2, 3.csv", sep = ',', header = 0,
names=['euname','modified','linked_country','iso3','iso2','grc','isonum','country','imperitive'])
df_countryCodes = df_countryCodes[['country', 'iso3', 'iso2']]
df_main = pd.merge(df_cities, df_countryCodes, on='iso2', how='inner')
geometry = [Point(xy) for xy in zip(df_main.Longitude, df_main.Latitude)]
crs = {'init': 'epsg:4326'}
df_geo = gp.GeoDataFrame(df_main, crs=crs, geometry=geometry)
print (df_geo)
df_geo.to_csv("./data/allDataCities.csv",header=True,index=False,sep=",")
def generateWorldToDB(loadCSV = False, getAsPandasDataFrame = False):
'''
- Main test method, contains two cases and main body
- The main issue is with handling geographic data. Since the available python libraries have no support for MSSQL.
Storing the data as csv maybe the best bet.
- With conventional data the transformation works
- The geometry type in SQL is image data with convert methods to coordinates or geometric shapes like polycon
returns: datasets for countries, cities
'''
world = gp.read_file(datasets.get_path('naturalearth_lowres'))
cities = gp.read_file(datasets.get_path('naturalearth_cities'))
if loadCSV:
world.to_csv('./data/countries.csv', sep='\t')
cities.to_csv('./data/cities.csv', sep='\t')
return world, cities
if getAsPandasDataFrame:
df_countries = pd.read_csv('./data/countries.csv',sep='\t',
index_col='iso_a3', names=['iso_a3', 'name','continent', 'gdp_md_est', 'geometry', 'pop_est'])
df_cities = pd.read_csv('./data/cities.csv',
index_col='name', names=['name', 'geometry'])
return df_countries, df_cities
else:
dbData = world.to_dict(orient = 'records')
dbData_1 = cities.to_dict(orient = 'records')
print ("original dataframe - countries: ", world)
print ("original dataframe - cities: ", cities)
tableNameA = 'GeographicNE'
print (GeographicNE.__table__)
# process for SQL
sql = sqla.create_engine(engineString)
conn = sql.connect()
metadata = sqla.schema.MetaData(bind=sql,reflect=True)
table = sqla.Table(tableNameA, metadata, autoload=True)
print (table)
# Open the session
Session= sessionmaker(bind=sql)
session = Session()
try:
conn.execute(table.insert(), dbData)
world.to_sql(tableNameA, sql, if_exists='append')
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
session.commit()
session.close()
return dbData, dbData_1
def shapefileTest(i = 0, i_max = 50):
'''
Loads gadm28 shapefile, containing geographical
- files: gadm28.shp, gadm28.dbf
- fields: ['OBJECTID', 'UID', 'ID_0', 'ISO', 'NAME_0', 'ID_1', 'NAME_1', 'VARNAME_1',
'NL_NAME_1', 'HASC_1', 'CCN_1', 'CCA_1', 'TYPE_1', 'ENGTYPE_1', 'VALIDFR_1', 'VALIDTO_1',
'REMARKS_1', 'ID_2', 'NAME_2', 'VARNAME_2', 'NL_NAME_2', 'HASC_2', 'CCN_2', 'CCA_2', 'TYPE_2',
'ENGTYPE_2', 'VALIDFR_2', 'VALIDTO_2', 'REMARKS_2', 'ID_3', 'NAME_3', 'VARNAME_3', 'NL_NAME_3',
'HASC_3', 'CCN_3', 'CCA_3', 'TYPE_3', 'ENGTYPE_3', 'VALIDFR_3', 'VALIDTO_3', 'REMARKS_3', 'ID_4',
'NAME_4', 'VARNAME_4', 'CCN_4', 'CCA_4', 'TYPE_4', 'ENGTYPE_4', 'VALIDFR_4', 'VALIDTO_4', 'REMARKS_4',
'ID_5', 'NAME_5', 'CCN_5', 'CCA_5', 'TYPE_5', 'ENGTYPE_5', 'REGION', 'VARREGION', 'Shape_Leng',
'Shape_Area']
location
- geometric
- polygon + coordinates marking
'''
myshp = open("./gadm28/gadm28.shp", "rb")
mydbf = open("./gadm28/gadm28.dbf", "rb")
r = shapefile.Reader(shp=myshp, dbf=mydbf)
fields = [field[0] for field in r.fields[1:]]
print ('fields: ', fields)
for feature in r.shapeRecords():
try:
geom = feature.shape.__geo_interface__
atr = dict(zip(fields, feature.record))
print ("geo_interface: ", geom)
print ('feature record: ', atr)
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
i = i + 1
if i == 50:
break
return r
def testSQLAlchemyORM():
'''
- Use dumy example, lake class to test commit to database using native geometry type.
- DOES not work with MSSQL, current implementation covers postgreSQL with postGIS
'''
print (Lake.__table__)
# lake = Lake(name='Majeur')
lake = Lake(name='Majeur', geom='POLYGON((0 0,1 0,1 1,0 1,0 0))')
# print (lake.geom)
sql = sqla.create_engine(engineString)
conn = sql.connect()
Session= sessionmaker(bind=sql)
session = Session()
try:
session.add(lake)
session.commit()
except:
print ('Exception type:', sys.exc_info()[0])
print ('Exception value:', sys.exc_info()[1])
session.close()
if __name__ == "__main__":
main()
|
apache-2.0
|
jobovy/segueSelect
|
segueSelect/segueSelect.py
|
1
|
75479
|
import os, os.path
import sys
import copy
import math
import numpy
from scipy import special, interpolate, optimize, misc, stats
import pyfits
import matplotlib
try:
from galpy.util import bovy_plot
except ImportError:
import bovy_plot
try:
from galpy.util import bovy_coords
_COORDSLOADED= True
except ImportError:
_COORDSLOADED= False
########################SELECTION FUNCTION DETERMINATION#######################
_INTERPDEGREEBRIGHT= 3
_INTERPDEGREEFAINT= 3
_BINEDGES_G_FAINT= [0.,50.,70.,85.,200000000.]
_BINEDGES_G_BRIGHT= [0.,75.,150.,300.,200000000.]
###############################FILENAMES#######################################
_SEGUESELECTDIR=os.getenv('SEGUESELECTDIR')
_GDWARFALLFILE= os.path.join(_SEGUESELECTDIR,'gdwarfall_raw_nodups_ysl_nospec.fit')
_GDWARFFILE= os.path.join(_SEGUESELECTDIR,'gdwarf_raw_nodups_ysl_nospec.fit')
_KDWARFALLFILE= os.path.join(_SEGUESELECTDIR,'kdwarfall_raw_nodups_ysl_nospec.fit')
_KDWARFFILE= os.path.join(_SEGUESELECTDIR,'kdwarf_raw_nodups_ysl_nospec.fit')
#DR9
_GDWARFALLFILE_DR9= os.path.join(_SEGUESELECTDIR,'gdwarfall_dr9_nospec_wsoplate.fit')
#_GDWARFALLFILE_DR9= os.path.join(_SEGUESELECTDIR,'gdwarfall_nodups_mydr9.fit')
_ERASESTR= " "
_RESOLVEFAINTBRIGHT= True
class segueSelect:
"""Class that contains selection function for SEGUE targets"""
def __init__(self,sample='G',plates=None,
select='all',dr9=False,
type_bright='tanhrcut',dr_bright=None,
interp_type_bright='tanh',
interp_degree_bright=_INTERPDEGREEBRIGHT,
robust_bright=True,
binedges_bright=_BINEDGES_G_BRIGHT,
type_faint='tanhrcut',dr_faint=None,
interp_type_faint='tanh',
interp_degree_faint=_INTERPDEGREEFAINT,
robust_faint=True,
binedges_faint=_BINEDGES_G_FAINT,
ug=False,ri=False,sn=True,
ebv=True,
_rmax=None,_rmin=None,indiv_brightlims=False,
_program_brightlims=False,
_platephot=None,_platespec=None,_spec=None):
"""
NAME:
__init__
PURPOSE:
load the selection function for this sample
INPUT:
sample= sample to load ('G', or 'K')
select= 'all' selects all SEGUE stars in the color-range;
'program' only selects program stars
plates= if set, only consider this plate, or list of plates,
or 'faint'/'bright'plates only,
or plates '>1000' or '<2000'
dr9= if True, use DR9 photometry (default: false=DR7)
SELECTION FUNCTION DETERMINATION:
default: tanhrcut for both bright and faint
type_bright= type of selection function to determine
'constant' for constant per plate;
'r' universal function of r
'plateSN_r' function of r for plates in ranges in plateSN_r
'sharprcut' sharp cut in r for each plate, at the r-band mag of the faintest object on this plate
'tanhrcut' cut in r for each plate, at the r-band mag of the faintest object on this plate, with 0.1 mag tanh softening
dr_bright= when determining the selection function as a function
of r, binsize to use
interp_degree_bright= when spline-interpolating, degree to use
interp_type_bright= type of interpolation to use ('tanh' or
'spline')
robust_bright= perform any fit robustly
type_faint=, faint_dr, interp_degree_bright, interp_type_faint,
robust_faint
= same as the corresponding keywords for bright
indiv_brightlims= if True, determine the bright/faint boundary as the brightest faint-plate spectrum, or the faintest bright-plate if there is no faint plate in the pair
SPECTROSCOPIC SAMPLE SELECTION:
ug= if True, cut on u-g,
if list/array cut to ug[0] < u-g< ug[1]
ri= if True, cut on r-i,
if list/array cut to ri[0] < r-i< ri[1]
sn= if False, don't cut on SN,
if number cut on SN > the number (15)
ebv= if True, cut on E(B-V),
if number cut on EBV < the number (0.3)
OUTPUT:
object
HISTORY:
2011-07-08 - Written - Bovy@MPIA (NYU)
"""
#Set options
if dr_bright is None:
if type_bright.lower() == 'r':
dr_bright= 0.05
elif type_bright.lower() == 'platesn_r':
if sample.lower() == 'k':
dr_bright= 0.4
elif sample.lower() == 'g':
dr_bright= 0.2
if dr_faint is None:
if type_faint.lower() == 'r':
dr_faint= 0.2
elif type_faint.lower() == 'platesn_r':
if sample.lower() == 'g':
dr_faint= 0.2
elif sample.lower() == 'k':
dr_faint= 0.5
self.sample=sample.lower()
#Load plates
self.platestr= _load_fits(os.path.join(_SEGUESELECTDIR,
'segueplates.fits'))
#Add platesn_r to platestr
platesn_r= (self.platestr.sn1_1+self.platestr.sn2_1)/2.
self.platestr= _append_field_recarray(self.platestr,
'platesn_r',platesn_r)
if plates is None:
self.plates= list(self.platestr.plate)
else:
if isinstance(plates,str):
self.plates= self.platestr.plate
if plates[0] == '>':
self.plates= self.plates[(self.plates > int(plates[1:len(plates)]))]
elif plates[0] == '<':
self.plates= self.plates[(self.plates < int(plates[1:len(plates)]))]
elif plates.lower() == 'faint':
indx= ['faint' in name for name in self.platestr.programname]
indx= numpy.array(indx,dtype='bool')
self.plates= self.plates[indx]
elif plates.lower() == 'bright':
indx= [not 'faint' in name for name in self.platestr.programname]
indx= numpy.array(indx,dtype='bool')
self.plates= self.plates[indx]
else:
print "'plates=' format not understood, check documentation"
return
self.plates= list(self.plates)
elif not isinstance(plates,(list,numpy.ndarray)):
self.plates= [plates]
elif isinstance(plates,numpy.ndarray):
self.plates= list(plates)
else:
self.plates= plates
#Remove 2820 for now BOVY DEAL WITH PLATE 2820, 2560, 2799, 2550
if 2820 in self.plates:
self.plates.remove(2820)
if 2560 in self.plates:
self.plates.remove(2560)
if 2799 in self.plates:
self.plates.remove(2799)
if 2550 in self.plates:
self.plates.remove(2550)
#Remove duplicate plates
self.plates= numpy.array(sorted(list(set(self.plates))))
#Match platestr to plates again
allIndx= numpy.arange(len(self.platestr),dtype='int')
reIndx= numpy.zeros(len(self.plates),dtype='int')-1
for ii in range(len(self.plates)):
indx= (self.platestr.field('plate') == self.plates[ii])
reIndx[ii]= (allIndx[indx][0])
self.platestr= self.platestr[reIndx]
#Build bright/faint dict
self.platebright= {}
for ii in range(len(self.plates)):
p= self.plates[ii]
if 'faint' in self.platestr[ii].programname:
self.platebright[str(p)]= False
else:
self.platebright[str(p)]= True
#Also build bright/faint index
brightplateindx= numpy.empty(len(self.plates),dtype='bool') #BOVY: move this out of here
faintplateindx= numpy.empty(len(self.plates),dtype='bool')
for ii in range(len(self.plates)):
if 'faint' in self.platestr[ii].programname: #faint plate
faintplateindx[ii]= True
brightplateindx[ii]= False
else:
faintplateindx[ii]= False
brightplateindx[ii]= True
self.faintplateindx= faintplateindx
self.brightplateindx= brightplateindx
self.nbrightplates= numpy.sum(self.brightplateindx)
self.nfaintplates= numpy.sum(self.faintplateindx)
#Build plate-pair array
platemate= numpy.zeros(len(self.plates),dtype='int')
indices= numpy.arange(len(self.plates),dtype='int')
for ii in range(len(self.plates)):
plate= self.plates[ii]
#Find plate's friend
indx= (self.platestr.ra == self.platestr[ii].ra)
if numpy.sum(indx) < 2:
platemate[ii]= -1 #No friend
continue
thisplates= self.plates[indx]
jj= indices[indx][0]
kk= indices[indx][1]
if ii == kk: platemate[ii]= jj
elif ii == jj: platemate[ii]= kk
self.platemate= platemate
#Set r limits
if self.sample == 'g':
self.rmin= 14.5
self.rmax= 20.2
elif self.sample == 'k':
self.rmin= 14.5
self.rmax= 19.
if not _rmin is None: self.rmin= _rmin
if not _rmax is None: self.rmax= _rmax
#load the spectroscopic data
self.select= select
if _platespec is None:
sys.stdout.write('\r'+"Reading and parsing spectroscopic data ...\r")
sys.stdout.flush()
if sample.lower() == 'g':
if select.lower() == 'all' and not dr9:
self.spec= read_gdwarfs(ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
elif select.lower() == 'all' and dr9:
self.spec= read_gdwarfs(file=_GDWARFALLFILE_DR9,
ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
self.spec['plate']= self.spec['soplate']
elif select.lower() == 'program':
self.spec= read_gdwarfs(file=_GDWARFFILE,
ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
elif sample.lower() == 'k':
if select.lower() == 'all':
self.spec= read_kdwarfs(ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
elif select.lower() == 'program':
self.spec= read_kdwarfs(file=_KDWARFFILE,
ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
if _RESOLVEFAINTBRIGHT and sample.lower() == 'g':
#Re-assign faint stars on bright plates and vice versa
for ii in range(len(self.spec)):
try:
if (self.spec['dered_r'][ii] > 17.8 and self.platebright['%i' % self.spec['plate'][ii]]) \
or (self.spec['dered_r'][ii] < 17.8 and not self.platebright['%i' % self.spec['plate'][ii]]):
pindx= self.plates == self.spec['plate'][ii]
self.spec['plate'][ii]= self.plates[self.platemate[pindx]]
except KeyError:
pass
self.platespec= {}
for plate in self.plates:
#Find spectra for each plate
indx= (self.spec.field('plate') == plate)
self.platespec[str(plate)]= self.spec[indx]
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
else:
self.platespec= _platespec
self.spec= _spec
#Set bright/faint divider
if indiv_brightlims:
if _program_brightlims and not select.lower() == 'program': #Grab the bright/faint interface from the program stars
if sample.lower() == 'g':
bfspec= read_gdwarfs(file=_GDWARFFILE,
ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
elif sample.lower() == 'k':
bfspec= read_kdwarfs(file=_KDWARFFILE,
ug=ug,ri=ri,sn=sn,
ebv=ebv,nocoords=True)
bfplatespec= {}
for plate in self.plates:
#Find spectra for each plate
indx= (bfspec.field('plate') == plate)
bfplatespec[str(plate)]= bfspec[indx]
else:
bfplatespec= self.platespec
#Use brightest faint-plate object as the bright/faint interface
faintbright= numpy.zeros(len(self.plates))
for ii in range(len(self.plates)):
#Pair?
if not self.platemate[ii] == -1:
#Which one's faint?
if faintplateindx[ii]: #First one
if len(bfplatespec[str(self.plates[ii])].r) > 0:
faintbright[ii]= numpy.amin(bfplatespec[str(self.plates[ii])].r)
elif len(bfplatespec[str(self.plates[self.platemate[ii]])].r) > 0:
faintbright[ii]= numpy.amax(bfplatespec[str(self.plates[self.platemate[ii]])].r)
else: faintbright[ii]= 17.8
elif faintplateindx[self.platemate[ii]]: #Second one
if len(bfplatespec[str(self.plates[self.platemate[ii]])].r) > 0:
faintbright[ii]= numpy.amin(bfplatespec[str(self.plates[self.platemate[ii]])].r)
elif len(bfplatespec[str(self.plates[ii])].r) > 0:
faintbright[ii]= numpy.amax(bfplatespec[str(self.plates[ii])].r)
else:
faintbright[ii]= 17.8
else:
print "Error: no faint plate found for plate-pair %i,%i ..."%(self.plates[ii],self.plates[self.platemate[ii]])
print "Returning ..."
return None
else:
if self.faintplateindx[ii]: #faint plate
faintbright[ii]= numpy.amin(bfplatespec[str(self.plates[ii])].r)
else:
faintbright[ii]= 17.8
self.faintbright= faintbright
else:
self.faintbright= numpy.zeros(len(self.plates))+17.8
#Also create faintbright dict
self.faintbrightDict= {}
for ii in range(len(self.plates)):
self.faintbrightDict[str(self.plates[ii])]= self.faintbright[ii]
#load the photometry for the SEGUE plates
if _platephot is None:
self.platephot= {}
for ii in range(len(self.plates)):
plate= self.plates[ii]
sys.stdout.write('\r'+"Loading photometry for plate %i" % plate)
sys.stdout.flush()
if dr9:
platefile= os.path.join(_SEGUESELECTDIR,'segueplates_dr9',
'%i.fit' % plate)
else:
platefile= os.path.join(_SEGUESELECTDIR,'segueplates',
'%i.fit' % plate)
self.platephot[str(plate)]= _load_fits(platefile)
#Split into bright and faint
if 'faint' in self.platestr[ii].programname:
indx= (self.platephot[str(plate)].field('r') >= self.faintbright[ii])
self.platephot[str(plate)]= self.platephot[str(plate)][indx]
else:
indx= (self.platephot[str(plate)].field('r') < self.faintbright[ii])
self.platephot[str(plate)]= self.platephot[str(plate)][indx]
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
else:
self.platephot= _platephot
#Flesh out samples
for plate in self.plates:
if self.sample == 'g':
indx= ((self.platephot[str(plate)].field('g')\
-self.platephot[str(plate)].field('r')) < 0.55)\
*((self.platephot[str(plate)].field('g')\
-self.platephot[str(plate)].field('r')) > 0.48)\
*(self.platephot[str(plate)].field('r') < 20.2)\
*(self.platephot[str(plate)].field('r') > 14.5)
elif self.sample == 'k':
indx= ((self.platephot[str(plate)].field('g')\
-self.platephot[str(plate)].field('r')) > 0.55)\
*((self.platephot[str(plate)].field('g')\
-self.platephot[str(plate)].field('r')) < 0.75)\
*(self.platephot[str(plate)].field('r') < 19.)\
*(self.platephot[str(plate)].field('r') > 14.5)
self.platephot[str(plate)]= self.platephot[str(plate)][indx]
#Determine selection function
sys.stdout.write('\r'+"Determining selection function ...\r")
sys.stdout.flush()
if not numpy.sum(self.brightplateindx) == 0:
self._determine_select(bright=True,type=type_bright,dr=dr_bright,
interp_degree=interp_degree_bright,
interp_type= interp_type_bright,
robust=robust_bright,
binedges=binedges_bright)
if not numpy.sum(self.faintplateindx) == 0:
self._determine_select(bright=False,type=type_faint,dr=dr_faint,
interp_degree=interp_degree_faint,
interp_type=interp_type_faint,
robust=robust_faint,
binedges=binedges_faint)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
return None
def __call__(self,plate,r=None,gr=None):
"""
NAME:
__call__
PURPOSE:
evaluate the selection function
INPUT:
plate - plate number
r - dereddened r-band magnitude
gr - dereddened g-r color
OUTPUT:
selection function
HISTORY:
2011-07-11 - Written - Bovy@MPIA (NYU)
"""
#Handle input
if isinstance(plate,(numpy.int16,int)) \
and (isinstance(r,(int,float)) or r is None): #Scalar input
plate= [plate]
r= [r]
scalarOut= True
elif isinstance(plate,(numpy.int16,int)) \
and isinstance(r,(list,numpy.ndarray)):
#Special case this for optimization if sharprcut
bright= self.platebright[str(plate)] #Short-cut
if (bright and self.type_bright.lower() == 'sharprcut') \
or (not bright and self.type_faint.lower() == 'sharprcut'):
nout= len(r)
if isinstance(r,list): thisr= numpy.array(r)
else: thisr= r
out= numpy.zeros(nout)
if bright:
indx= (thisr >= 14.5)*(thisr <= numpy.amin([self.rcuts[str(plate)],self.faintbrightDict[str(plate)]]))
else:
indx= (thisr >= self.faintbrightDict[str(plate)])*(thisr <= numpy.amin([self.rcuts[str(plate)],self.rmax]))
if numpy.sum(indx) == 0: return out
out[indx]= self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]
if isinstance(r,list): return list(out)
else: return out
elif (bright and self.type_bright.lower() == 'tanhrcut') \
or (not bright and self.type_faint.lower() == 'tanhrcut'):
nout= len(r)
if isinstance(r,list): thisr= numpy.array(r)
else: thisr= r
out= numpy.zeros(nout)
if bright:
indx= (thisr >= 14.5)*(thisr <= self.faintbrightDict[str(plate)])
else:
indx= (thisr >= self.faintbrightDict[str(plate)])*(thisr <= self.rmax)
if numpy.sum(indx) == 0: return out
out[indx]= self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]\
*_sf_tanh(thisr[indx],[self.rcuts[str(plate)]-0.1,
-3.,0.])
if isinstance(r,list): return list(out)
else: return out
else:
if isinstance(r,numpy.ndarray):
plate= numpy.array([plate for ii in range(len(r))])
else:
plate= [plate for ii in range(len(r))]
scalarOut= False
else:
scalarOut= False
out= []
for ii in range(len(plate)):
p= plate[ii]
out.append(self._call_single(p,r[ii]))
if isinstance(plate,numpy.ndarray):
out= numpy.array(out)
if scalarOut:
return out[0]
else:
return out
def _call_single(self,plate,r):
"""Call the selection function for a single object"""
#First check whether this plate exists
if not plate in self.plates: return 0.
#First determine whether this is a bright or a faint plate
bright= self.platebright[str(plate)] #Short-cut
if bright:
if not self.type_bright.lower() == 'tanhrcut+brightsharprcut' and (r >= self.faintbrightDict[str(plate)] or r < self.rmin): return 0.
elif self.type_bright.lower() == 'constant':
return self.weight[str(plate)]
elif self.type_bright.lower() == 'r':
if self.interp_type_bright.lower() == 'spline':
if r < self.s_one_r_bright_minxo:
return numpy.exp(_linear_func(r,
self.s_one_r_bright_minderiv,
self.s_one_r_bright_minxo,
self.s_one_r_bright_minyo))\
*self.weight[str(plate)]
else:
soner= numpy.exp(\
interpolate.splev(r,self.s_one_r_bright_interpolate))
if soner < 0.: return 0.
else: return self.weight[str(plate)]*soner
elif self.interp_type_bright.lower() == 'tanh':
return _sf_tanh(r,self.s_one_r_tanh_params_bright)\
*self.weight[str(plate)]
elif self.type_bright.lower() == 'platesn_r':
return self.platesn_sfs_bright[self.platesn_platebin_dict_bright[str(plate)]](plate,r=r)
elif self.type_bright.lower() == 'sharprcut':
if r <= self.rcuts[str(plate)]:
return self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]
else:
return 0.
elif self.type_bright.lower() == 'tanhrcut':
return self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]\
*_sf_tanh(r,[self.rcuts[str(plate)]-0.1,
-3.,0.])
elif self.type_bright.lower() == 'tanhrcut+brightsharprcut':
if r <= self.rcuts_bright[str(plate)]: return 0.
return self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]\
*_sf_tanh(r,[self.rcuts_faint[str(plate)]-0.1,
-3.,0.])
else:
if not self.type_faint.lower() == 'tanhrcut+brightsharprcut' and (r < self.faintbrightDict[str(plate)] or r > self.rmax): return 0.
elif self.type_faint.lower() == 'constant':
return self.weight[str(plate)]
elif self.type_faint.lower() == 'r':
if self.interp_type_faint.lower() == 'spline':
if r < self.s_one_r_faint_minxo:
return numpy.exp(_linear_func(r,
self.s_one_r_faint_minderiv,
self.s_one_r_faint_minxo,
self.s_one_r_faint_minyo))\
*self.weight[str(plate)]
else:
soner= numpy.exp(\
interpolate.splev(r,self.s_one_r_faint_interpolate))
if soner < 0.: return 0.
else: return self.weight[str(plate)]*soner
elif self.interp_type_faint.lower() == 'tanh':
return _sf_tanh(r,self.s_one_r_tanh_params_faint)\
*self.weight[str(plate)]
elif self.type_faint.lower() == 'platesn_r':
return self.platesn_sfs_faint[self.platesn_platebin_dict_faint[str(plate)]](plate,r=r)
elif self.type_faint.lower() == 'sharprcut':
if r <= self.rcuts[str(plate)]:
return self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]
else:
return 0.
elif self.type_faint.lower() == 'tanhrcut':
return self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]\
*_sf_tanh(r,[self.rcuts[str(plate)]-0.1,
-3.,0.])
elif self.type_faint.lower() == 'tanhrcut+brightsharprcut':
if r <= self.rcuts_bright[str(plate)]: return 0.
return self.weight[str(plate)]\
*self.rcuts_correct[str(plate)]\
*_sf_tanh(r,[self.rcuts_faint[str(plate)]-0.1,
-3.,0.])
def check_consistency(self,plate):
"""
NAME:
check_consistency
PURPOSE:
calculate the KS probability that this plate is consistent with
being drawn from the underlying photometrix sample using our model
for the selection function
INPUT:
pate - plate number(s), 'all', 'bright', or 'faint'
OUTPUT:
KS probability or list/array of such numbers
HISTORY:
2011-07-21 - Written - Bovy@MPIA (NYU)
"""
#Handle input
scalarOut= False
if isinstance(plate,str) and plate.lower() == 'all':
plate= self.plates
elif isinstance(plate,str) and plate.lower() == 'bright':
plate= self.plates[self.brightplateindx]
elif isinstance(plate,str) and plate.lower() == 'faint':
plate= self.plates[self.faintplateindx]
if isinstance(plate,(numpy.int16,int)): #Scalar input
plate= [plate]
scalarOut= True
out= []
for p in plate:
out.append(self._check_consistency_single(p))
if scalarOut: return out[0]
elif isinstance(plate,numpy.ndarray): return numpy.array(out)
else: return out
def _check_consistency_single(self,plate):
"""check_consistency for a single plate"""
photr,specr,fn1,fn2= self._plate_rcdfs(plate)
if photr is None:
return -1
j1, j2, i= 0, 0, 0
id1= range(len(photr)+len(specr))
id2= range(len(photr)+len(specr))
while j1 < len(photr) and j2 < len(specr):
d1= photr[j1]
d2= specr[j2]
if d1 <= d2: j1+= 1
if d2 <= d1: j2+= 1
id1[i]= j1
id2[i]= j2
i+= 1
id1= id1[0:i-1]
id2= id2[0:i-1]
D= numpy.amax(numpy.fabs(fn1[id1]-fn2[id2]))
neff= len(photr)*len(specr)/float(len(photr)+len(specr))
return stats.ksone.sf(D,neff)
def _plate_rcdfs(self,plate):
#Load photometry and spectroscopy for this plate
thisplatephot= self.platephot[str(plate)]
thisplatespec= self.platespec[str(plate)]
#Cut to bright or faint part
if self.platebright[str(plate)]:
thisplatespec= thisplatespec[(thisplatespec.dered_r < self.faintbrightDict[str(plate)])\
*(thisplatespec.dered_r > self.rmin)]
else:
thisplatespec= thisplatespec[(thisplatespec.dered_r < self.rmax)\
*(thisplatespec.dered_r >= self.faintbrightDict[str(plate)])]
if len(thisplatespec.dered_r) == 0: return (None,None,None,None)
#Calculate selection function weights for the photometry
w= numpy.zeros(len(thisplatephot.r))
for ii in range(len(w)):
w[ii]= self(plate,r=thisplatephot[ii].r)
#Calculate KS test statistic
sortindx_phot= numpy.argsort(thisplatephot.r)
sortindx_spec= numpy.argsort(thisplatespec.dered_r)
sortphot= thisplatephot[sortindx_phot]
sortspec= thisplatespec[sortindx_spec]
w= w[sortindx_phot]
fn1= numpy.cumsum(w)/numpy.sum(w)
fn2= numpy.ones(len(sortindx_spec))
fn2= numpy.cumsum(fn2)
fn2/= fn2[-1]
return (sortphot.r,sortspec.dered_r,fn1,fn2)
def plot_plate_rcdf(self,plate,overplot=False,xrange=None,yrange=None,
photcolor='k',speccolor='r'):
"""
NAME:
plot_plate_rcdf
PURPOSE:
plot the r-band magnitude CDF for the photometric sample * selection
function model and for the spectroscopic sample for a single plate
INPUT:
plate - plate to plot
overplot= of True, overplot
xrange=, yrange=
photcolor=, speccolor= color to use
OUTPUT:
plot
HISTORY:
2011-07-21 - Written - Bovy@MPIA (NYU)
"""
photr,specr,fn1,fn2= self._plate_rcdfs(plate)
if photr is None:
print "Plate %i has no spectroscopic data ..." % plate
print "Returning ..."
return None
if xrange is None: xrange= [numpy.amin([numpy.amin(photr),numpy.amin(specr)])-0.1,
numpy.amax([numpy.amax(photr),numpy.amax(specr)])+0.1]
if yrange is None: yrange= [0.,1.1]
bovy_plot.bovy_plot(photr,fn1,photcolor+'-',overplot=overplot)
bovy_plot.bovy_plot(specr,fn2,speccolor+'-',overplot=True)
return None
def plot(self,x='r',y='sf',plate='a bright plate',overplot=False):
"""
NAME:
plot
PURPOSE:
plot the derived selection function
INPUT:
x= what to plot on the x-axis (e.g., 'r')
y= what to plot on the y-axis (default function value)
plate= plate to plot (number or 'a bright plate' (default), 'a faint plate')
overplot= if True, overplot
OUTPUT:
plot to output
HISTORY:
2011-07-18 - Written - Bovy@MPIA (NYU)
"""
_NXS= 1001
if isinstance(plate,str) and plate.lower() == 'a bright plate':
plate= 2964
elif isinstance(plate,str) and plate.lower() == 'a faint plate':
plate= 2965
if x.lower() == 'r':
xs= numpy.linspace(self.rmin,self.rmax,_NXS)
xrange= [self.rmin,self.rmax]
xlabel= r'$r_0\ [\mathrm{mag}]$'
#Evaluate selection function
zs= self(plate,r=xs)
if y.lower() == 'sf':
ys= zs
ylabel= r'$\mathrm{selection\ function}$'
yrange= [0.,1.2*numpy.amax(ys)]
bovy_plot.bovy_plot(xs,ys,'k-',xrange=xrange,yrange=yrange,
xlabel=xlabel,ylabel=ylabel,
overplot=overplot)
return None
def plot_s_one_r(self,plate='a bright plate',overplot=False,color='k',
xrange=None,yrange=None):
"""
NAME:
plot_s_one_r
PURPOSE:
plot the derived selection function s_1(r)
INPUT:
plate= plate to plot (number or 'a bright plate' (default),
'a faint plate')
overplot= if True, overplot
xrange=, yrange=
OUTPUT:
plot to output
HISTORY:
2011-07-20 - Written - Bovy@MPIA (NYU)
"""
_NXS= 1001
if isinstance(plate,str) and plate.lower() == 'a bright plate':
plate= 2964
elif isinstance(plate,str) and plate.lower() == 'a faint plate':
plate= 2965
xs= numpy.linspace(self.rmin+0.001,self.rmax-0.001,_NXS)
if xrange is None: xrange= [self.rmin,self.rmax]
xlabel= r'$r\ [\mathrm{mag}]$'
#Evaluate selection function
ys= numpy.array(self(plate,r=xs))/self.weight[str(plate)]
ylabel= r'$r\ \mathrm{dependence\ of\ selection\ function}$'
if yrange is None: yrange= [0.,1.2*numpy.amax(ys)]
bovy_plot.bovy_plot(xs,ys,color+'-',xrange=xrange,yrange=yrange,
xlabel=xlabel,ylabel=ylabel,
overplot=overplot)
pindx= (self.plates == plate)
if (self.brightplateindx[pindx][0] \
and self.type_bright.lower() != 'r')\
or (self.faintplateindx[pindx][0] \
and self.type_faint.lower() != 'r'): return
#Also plot data
from matplotlib.pyplot import errorbar
if self.platebright[str(plate)]:
bovy_plot.bovy_plot(self.s_r_plate_rs_bright,
self.s_one_r_bright,
color=color,
marker='o',ls='none',overplot=True)
errorbar(self.s_r_plate_rs_bright,
self.s_one_r_bright,
self.s_one_r_err_bright,
xerr= numpy.zeros(len(self.interp_rs_bright))+(self.interp_rs_bright[1]-self.interp_rs_bright[0])/2.,
fmt=None,ecolor=color)
else:
bovy_plot.bovy_plot(self.s_r_plate_rs_faint,
self.s_one_r_faint,
color=color,
marker='o',ls='none',overplot=True)
errorbar(self.s_r_plate_rs_faint,
self.s_one_r_faint,
self.s_one_r_err_faint,
xerr= numpy.zeros(len(self.interp_rs_faint))+(self.interp_rs_faint[1]-self.interp_rs_faint[0])/2.,
fmt=None,ecolor=color)
return None
def plotColorMag(self,x='gr',y='r',plate='all',spec=False,scatterplot=True,
bins=None,specbins=None):
"""
NAME:
plotColorMag
PURPOSE:
plot the distribution of photometric/spectroscopic objects in color
magnitude (or color-color) space
INPUT:
x= what to plot on the x-axis (combinations of ugriz as 'g',
or 'gr')
y= what to plot on the y-axis (combinations of ugriz as 'g',
or 'gr')
plate= plate(s) to plot, int or list/array, 'all', 'bright', 'faint'
spec= if True, overlay spectroscopic objects as red contours and
histograms
scatterplot= if False, regular scatterplot,
if True, hogg_scatterplot
bins= number of bins to use in the histogram(s)
specbins= number of bins to use in histograms of spectropscopic
objects
OUTPUT:
HISTORY:
2011-07-13 - Written - Bovy@MPIA (NYU)
"""
if isinstance(plate,str) and plate.lower() == 'all':
plate= self.plates
elif isinstance(plate,str) and plate.lower() == 'bright':
plate= []
for ii in range(len(self.plates)):
if not 'faint' in self.platestr[ii].programname:
plate.append(self.plates[ii])
elif isinstance(plate,str) and plate.lower() == 'faint':
plate= []
for ii in range(len(self.plates)):
if 'faint' in self.platestr[ii].programname:
plate.append(self.plates[ii])
elif isinstance(plate,(list,numpy.ndarray)):
plate=plate
else:
plate= [plate]
xs, ys= [], []
specxs, specys= [], []
for ii in range(len(plate)):
p=plate[ii]
thisplatephot= self.platephot[str(p)]
thisplatespec= self.platespec[str(p)]
if len(x) > 1: #Color
xs.extend(thisplatephot.field(x[0])\
-thisplatephot.field(x[1])) #dereddened
specxs.extend(thisplatespec.field('dered_'+x[0])\
-thisplatespec.field('dered_'+x[1]))
else:
xs.extend(thisplatephot.field(x[0]))
specxs.extend(thisplatespec.field('dered_'+x[0]))
if len(y) > 1: #Color
ys.extend(thisplatephot.field(y[0])\
-thisplatephot.field(y[1])) #dereddened
specys.extend(thisplatespec.field('dered_'+y[0])\
-thisplatespec.field('dered_'+y[1]))
else:
ys.extend(thisplatephot.field(y[0]))
specys.extend(thisplatespec.field('dered_'+y[0]))
xs= numpy.array(xs)
xs= numpy.reshape(xs,numpy.prod(xs.shape))
ys= numpy.array(ys)
ys= numpy.reshape(ys,numpy.prod(ys.shape))
specxs= numpy.array(specxs)
specxs= numpy.reshape(specxs,numpy.prod(specxs.shape))
specys= numpy.array(specys)
specys= numpy.reshape(specys,numpy.prod(specys.shape))
if len(x) > 1:
xlabel= '('+x[0]+'-'+x[1]+')_0'
else:
xlabel= x[0]+'_0'
xlabel= r'$'+xlabel+r'$'
if len(y) > 1:
ylabel= '('+y[0]+'-'+y[1]+')_0'
else:
ylabel= y[0]+'_0'
ylabel= r'$'+ylabel+r'$'
if len(x) > 1: #color
xrange= [numpy.amin(xs)-0.02,numpy.amax(xs)+0.02]
else:
xrange= [numpy.amin(xs)-0.7,numpy.amax(xs)+0.7]
if len(y) > 1: #color
yrange= [numpy.amin(ys)-0.02,numpy.amax(ys)+0.02]
else:
yrange= [numpy.amin(ys)-0.7,numpy.amax(ys)+0.7]
if bins is None:
bins= int(numpy.ceil(0.3*numpy.sqrt(len(xs))))
if specbins is None: specbins= bins
if scatterplot:
if len(xs) > 100000: symb= 'w,'
else: symb= 'k,'
if spec:
#First plot spectroscopic sample
cdict = {'red': ((.0, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((.0, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((.0, 1.0, 1.0),
(1.0, 1.0, 1.0))}
allwhite = matplotlib.colors.LinearSegmentedColormap('allwhite',cdict,256)
speclevels= list(special.erf(0.5*numpy.arange(1,4)))
speclevels.append(1.01)#HACK TO REMOVE OUTLIERS
bovy_plot.scatterplot(specxs,specys,symb,onedhists=True,
levels=speclevels,
onedhistec='k',
cntrcolors='w',
onedhistls='dashed',
onedhistlw=1.5,
cmap=allwhite,
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange,
bins=specbins)
bovy_plot.scatterplot(xs,ys,symb,onedhists=True,
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange,bins=bins,
overplot=spec)
else:
bovy_plot.bovy_plot(xs,ys,'k,',onedhists=True,
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,yrange=yrange)
return None
def _determine_select(self,bright=True,type=None,dr=None,
interp_degree=_INTERPDEGREEBRIGHT,
interp_type='tanh',
robust=False,
binedges=None):
"""Function that actually determines the selection function"""
if bright:
self.type_bright= type
plateindx= self.brightplateindx
else:
self.type_faint= type
plateindx= self.faintplateindx
if type.lower() == 'platesn_r': #plateSN_r dependent r selection
#Divide up plates in bins
nbins= len(binedges)-1
plate_in_bins= [[] for ii in range(nbins)]
platebin_dict= {}
theseplates= self.plates[plateindx]
thisplatestr= self.platestr[plateindx]
for ii in range(len(theseplates)):
kk= 0
while kk < nbins \
and thisplatestr[ii].platesn_r > binedges[kk+1]:
kk+=1
plate_in_bins[kk].append(theseplates[ii])
#Also create dictionary with bin for each plate
platebin_dict[str(theseplates[ii])]= kk
#For each set of plates, instantiate new selection object
platesn_sfs= []
for kk in range(nbins):
if bright:
type_faint= 'constant'
type_bright= 'r'
else:
type_faint= 'r'
type_bright= 'constant'
platesn_sfs.append(segueSelect(sample=self.sample,
plates=plate_in_bins[kk],
select=self.select,
type_bright=type_bright,
dr_bright=dr,
interp_type_bright='tanh',
interp_degree_bright=interp_degree,
robust_bright=robust,
type_faint=type_faint,
dr_faint=dr,
interp_type_faint='tanh',
interp_degree_faint=interp_degree,
robust_faint=robust,
_platephot=copy.copy(self.platephot),
_platespec=copy.copy(self.platespec)
,_spec=copy.copy(self.spec)))
if bright:
self.platesn_plate_in_bins_bright= plate_in_bins
self.platesn_platebin_dict_bright= platebin_dict
self.platesn_sfs_bright= platesn_sfs
else:
self.platesn_plate_in_bins_faint= plate_in_bins
self.platesn_sfs_faint= platesn_sfs
self.platesn_platebin_dict_faint= platebin_dict
return None #Done here!
#First determine the total weight for each plate
if not hasattr(self,'weight'): self.weight= {}
for ii in range(len(self.plates)):
if bright and 'faint' in self.platestr[ii].programname: continue
elif not bright \
and not 'faint' in self.platestr[ii].programname: continue
plate= self.plates[ii]
self.weight[str(plate)]= len(self.platespec[str(plate)])\
/float(len(self.platephot[str(plate)]))
if type.lower() == 'constant':
return #We're done!
if type.lower() == 'sharprcut' or type.lower() == 'tanhrcut':
#For each plate cut at the location of the faintest object
if not hasattr(self,'rcuts'): self.rcuts= {}
if not hasattr(self,'rcuts_correct'): self.rcuts_correct= {}
for ii in range(len(self.plates)):
if bright and 'faint' in self.platestr[ii].programname: continue
elif not bright \
and not 'faint' in self.platestr[ii].programname: continue
p= self.plates[ii]
if self.weight[str(p)] == 0.:
self.rcuts[str(p)]= 0.
self.rcuts_correct[str(p)]= 0.
continue
self.rcuts[str(p)]= numpy.amax(self.platespec[str(p)].dered_r)
denom= float(numpy.sum((self.platephot[str(p)].r <= self.rcuts[str(p)])))
if denom == 0.: self.rcuts_correct[str(p)]= 0.
else:
self.rcuts_correct[str(p)]= \
float(len(self.platephot[str(p)]))/denom
elif type.lower() == 'tanhrcut+brightsharprcut':
#For each plate cut at the location of the brightest and faintest object
if not hasattr(self,'rcuts_faint'): self.rcuts_faint= {}
if not hasattr(self,'rcuts_bright'): self.rcuts_bright= {}
if not hasattr(self,'rcuts_correct'): self.rcuts_correct= {}
for ii in range(len(self.plates)):
if bright and 'faint' in self.platestr[ii].programname: continue
elif not bright \
and not 'faint' in self.platestr[ii].programname: continue
p= self.plates[ii]
if self.weight[str(p)] == 0.:
self.rcuts_bright[str(p)]= 0.
self.rcuts_faint[str(p)]= 0.
self.rcuts_correct[str(p)]= 0.
continue
self.rcuts_bright[str(p)]= numpy.amin(self.platespec[str(p)].dered_r)
self.rcuts_faint[str(p)]= numpy.amax(self.platespec[str(p)].dered_r)
denom= float(numpy.sum((self.platephot[str(p)].r <= self.rcuts_faint[str(p)])*(self.platephot[str(p)].r > self.rcuts_bright[str(p)])))
if denom == 0.: self.rcuts_correct[str(p)]= 0.
else:
self.rcuts_correct[str(p)]= \
float(len(self.platephot[str(p)]))/denom
elif type.lower() == 'r':
#Determine the selection function in bins in r, for bright/faint
nrbins= int(math.floor((17.8-self.rmin)/dr))+1
s_one_r= numpy.zeros((nrbins,len(self.plates)))
s_r= numpy.zeros((nrbins,len(self.plates)))
#Determine s_1(r) for each plate separately first
weights= numpy.zeros(len(self.plates))
if not bright:
thisrmin, thisrmax= 17.8, self.rmax+dr/2. #slightly further to avoid out-of-range errors
else:
thisrmin, thisrmax= self.rmin-dr/2., 17.8 #slightly further to avoid out-of-range errors
for ii in range(len(self.plates)):
plate= self.plates[ii]
if bright and 'faint' in self.platestr[ii].programname:
continue
elif not bright \
and not 'faint' in self.platestr[ii].programname:
continue
nspecr, edges = numpy.histogram(self.platespec[str(plate)].dered_r,bins=nrbins,range=[thisrmin,thisrmax])
nphotr, edges = numpy.histogram(self.platephot[str(plate)].r,
bins=nrbins,
range=[thisrmin,thisrmax])
nspecr= numpy.array(nspecr,dtype='float64')
nphotr= numpy.array(nphotr,dtype='float64')
nonzero= (nspecr > 0.)*(nphotr > 0.)
s_r[nonzero,ii]= nspecr[nonzero].astype('float64')/nphotr[nonzero]
weights[ii]= float(numpy.sum(nspecr))/float(numpy.sum(nphotr))
nspecr/= float(numpy.sum(nspecr))
nphotr/= float(numpy.sum(nphotr))
s_one_r[nonzero,ii]= nspecr[nonzero]/nphotr[nonzero]
if bright:
self.s_r_plate_rs_bright= \
numpy.linspace(self.rmin+dr/2.,17.8-dr/2.,nrbins)
self.s_r_plate_bright= s_r
self.s_one_r_plate_bright= s_one_r
else:
self.s_r_plate_rs_faint= \
numpy.linspace(17.8+dr/2.,self.rmax-dr/2.,nrbins)
self.s_r_plate_faint= s_r
self.s_one_r_plate_faint= s_one_r
s_one_r_plate= s_one_r
s_r_plate= s_r
fromIndividual= False
if fromIndividual:
#Mean or median?
median= False
if median:
s_one_r= numpy.median(s_one_r_plate[:,plateindx],axis=1)
else:
if bright:
s_one_r= numpy.sum(s_one_r_plate,axis=1)/self.nbrightplates
else:
s_one_r= numpy.sum(s_one_r_plate,axis=1)/self.nfaintplates
else:
s_one_r= \
numpy.sum(s_r_plate[:,plateindx],axis=1)\
/numpy.sum(weights)
if bright:
self.s_one_r_bright= s_one_r
self.s_r_bright= s_r
else:
self.s_one_r_faint= s_one_r
self.s_r_faint= s_r
#Bootstrap an uncertainty on the selection function
if bright: nplates= self.nbrightplates
else: nplates= self.nfaintplates
jack_samples= numpy.zeros((nplates,len(s_one_r)))
jack_s_r_plate= s_r_plate[:,plateindx]
jack_s_r_weights= weights[plateindx]
for jj in range(nplates):
boot_indx= numpy.array([True for ii in range(nplates)],\
dtype='bool')
boot_indx[jj]= False
if fromIndividual:
#Mean or median?
if median:
jack_samples[jj,:]= numpy.median(s_one_r_plate[:,plateindx[boot_indx]],
axis=1)
else:
jack_samples[jj,:]= numpy.sum(s_one_r_plate[:,plateindx[boot_indx]],
axis=1)/nplates
else:
jack_samples[jj,:]= \
numpy.sum(jack_s_r_plate[:,boot_indx],axis=1)\
/numpy.sum(jack_s_r_weights[boot_indx])
#Compute jackknife uncertainties
s_one_r_err= numpy.sqrt((nplates-1)*numpy.var(jack_samples,
axis=0))
s_one_r_err[(s_one_r_err == 0.)]= 0.01
if bright:
self.s_one_r_jack_samples_bright= jack_samples
self.s_one_r_err_bright= s_one_r_err
else:
self.s_one_r_jack_samples_faint= jack_samples
self.s_one_r_err_faint= s_one_r_err
if bright: self.interp_type_bright= interp_type
else: self.interp_type_faint= interp_type
if bright:
w= numpy.zeros(len(self.s_one_r_bright))+10000.
yfunc= numpy.zeros(len(w))-20.
nonzero= (self.s_one_r_bright > 0.)
w[nonzero]= \
self.s_one_r_bright[nonzero]/self.s_one_r_err_bright[nonzero]
yfunc[nonzero]= numpy.log(self.s_one_r_bright[nonzero])
self.interp_rs_bright= \
numpy.linspace(self.rmin+1.*dr/2.,17.8-1.*dr/2.,nrbins)
if interp_type.lower() == 'spline':
self.s_one_r_bright_interpolate= interpolate.splrep(\
self.interp_rs_bright,yfunc,
k=interp_degree,w=w)
#Continue along the derivative for out of bounds
minderiv= interpolate.splev(self.interp_rs_bright[0],
self.s_one_r_bright_interpolate,
der=1)
self.s_one_r_bright_minderiv= minderiv
self.s_one_r_bright_minxo= self.interp_rs_bright[0]
self.s_one_r_bright_minyo= yfunc[0]
elif interp_type.lower() == 'tanh':
#Fit a tanh to s_1(r)
params= numpy.array([17.7,numpy.log(0.1),
numpy.log(3.)])
params= optimize.fmin_powell(_sf_tanh_minusloglike,
params,
args=(self.interp_rs_bright,
self.s_one_r_bright,
self.s_one_r_err_bright,
numpy.zeros(len(self.interp_rs_bright))+(self.interp_rs_bright[1]-self.interp_rs_bright[0])/2.,
robust))
self.s_one_r_tanh_params_bright= params
else:
w= numpy.zeros(len(self.s_one_r_faint))+10000.
yfunc= numpy.zeros(len(w))-20.
nonzero= (self.s_one_r_faint > 0.)
w[nonzero]= \
self.s_one_r_faint[nonzero]/self.s_one_r_err_faint[nonzero]
yfunc[nonzero]= numpy.log(self.s_one_r_faint[nonzero])
self.interp_rs_faint= \
numpy.linspace(17.8+1.*dr/2.,self.rmax-dr/2.,nrbins)
if interp_type.lower() == 'spline':
self.s_one_r_faint_interpolate= interpolate.splrep(\
self.interp_rs_faint,yfunc,
k=interp_degree,w=w)
#Continue along the derivative for out of bounds
minderiv= interpolate.splev(self.interp_rs_faint[0],
self.s_one_r_faint_interpolate,
der=1)
self.s_one_r_faint_minderiv= minderiv
self.s_one_r_faint_minxo= self.interp_rs_faint[0]
self.s_one_r_faint_minyo= yfunc[0]
elif interp_type.lower() == 'tanh':
#Fit a tanh to s_1(r)
params= numpy.array([18.7,numpy.log(0.1),
numpy.log(3.)])
params= optimize.fmin_powell(_sf_tanh_minusloglike,
params,
args=(self.interp_rs_faint,
self.s_one_r_faint,
self.s_one_r_err_faint,
numpy.zeros(len(self.interp_rs_faint))+(self.interp_rs_faint[1]-self.interp_rs_faint[0])/2.,robust))
self.s_one_r_tanh_params_faint= params
return None
def _sf_tanh(r,params):
"""Tanh description of the selection,
params=[rcentral,logsigmar,logconstant]"""
return math.exp(params[2])/2.*(1.-numpy.tanh((r-params[0])/math.exp(params[1])))
def _sf_tanh_minusloglike(params,rs,sfs,sferrs,rerrs=None,robust=False):
#return 0.5*numpy.sum((sfs-_sf_tanh(rs,params))**2./2./sferrs**2.)
#Robust
if rerrs is None:
if robust:
return numpy.sum(numpy.fabs((sfs-_sf_tanh(rs,params))/sferrs))
else:
return numpy.sum((sfs-_sf_tanh(rs,params))**2./2./sferrs**2.)
else:
ngrid= 21
nsigma= 3.
grid= numpy.linspace(-nsigma,nsigma,ngrid)
if robust:
presum= numpy.fabs(grid)
else:
presum= grid**2./2.
out= 0.
for ii in range(len(rs)):
thisgrid= grid*rerrs[ii]+rs[ii]
if robust:
out+= misc.logsumexp(presum+numpy.fabs(sfs[ii]-_sf_tanh(thisgrid,
params))/\
sferrs[ii])
else:
out+= misc.logsumexp(presum+(sfs[ii]-_sf_tanh(thisgrid,
params))**2./2./\
sferrs[ii]**2.)
return out
def _linear_func(x,deriv,xo,yo):
"""Evaluate a linear function"""
return deriv*(x-xo)+yo
def ivezic_dist_gr(g,r,feh,dg=0.,dr=0.,dfeh=0.,return_error=False,
dmr=0.1):
"""
NAME:
ivezic_dist_gr
PURPOSE:
Ivezic et al. (2008) distances in terms of g-r for <M0 stars
INPUT:
g, r, feh - dereddened g and r and metallicity
return_error= if True, return errors
dg, dr, dfeh= uncertainties
dmr= intrinsic cmd scatter
OUTPUT:
(dist,disterr) arrays in kpc
HISTORY:
2011-07-11 - Written - Bovy@MPIA (NYU)
"""
#First distances, then uncertainties
gi= _gi_gr(g-r)
mr= _mr_gi(gi,feh)
ds= 10.**(0.2*(r-mr)-2.)
if not return_error: return (ds,0.*ds)
#Now propagate the uncertainties
dgi= numpy.sqrt(_gi_gr(g-r,dg=True)**2.*dg**2.
+_gi_gr(g-r,dr=True)**2.*dr**2.)
dmr= numpy.sqrt(_mr_gi(gi,feh,dgi=True)**2.*dgi**2.
+_mr_gi(gi,feh,dfeh=True)**2.*dfeh**2.+dmr**2.)
derrs= 0.2*numpy.log(10.)*numpy.sqrt(dmr**2.+dr**2.)*ds
return (ds,derrs)
def juric_dist_gr(g,r,dg=0.,dr=0.,return_error=False,
dmr=0.3,faint=False):
"""
NAME:
juric_dist_gr
PURPOSE:
Juric et al. (2008) distances in terms of g-r for <M0 stars
INPUT:
g, r- dereddened g and r
return_error= if True, return errors
dg, dr= uncertainties
dmr= intrinsic cmd scatter
faint= if True, use faint relation, else use bright
OUTPUT:
(dist,disterr) arrays in kpc
HISTORY:
2011-08-08 - Written - Bovy (NYU)
"""
#First distances, then uncertainties
ri= _ri_gr(g-r)
if faint:
mr= _mr_ri_faint(ri)
else:
mr= _mr_ri_bright(ri)
ds= 10.**(0.2*(r-mr)-2.)
if not return_error: return (ds,0.*ds)
#Now propagate the uncertainties
dri= numpy.sqrt(_ri_gr(g-r,dg=True)**2.*dg**2.
+_ri_gr(g-r,dr=True)**2.*dr**2.)
if faint:
dmr= numpy.sqrt(_mr_ri_faint(ri,dri=True)**2.*dri**2.
+dmr**2.)
else:
dmr= numpy.sqrt(_mr_ri_bright(ri,dri=True)**2.*dri**2.
+dmr**2.)
derrs= 0.2*numpy.log(10.)*numpy.sqrt(dmr**2.+dr**2.)*ds
return (ds,derrs)
def read_gdwarfs(file=_GDWARFALLFILE,logg=False,ug=False,ri=False,sn=True,
ebv=True,nocoords=False):
"""
NAME:
read_gdwarfs
PURPOSE:
read the spectroscopic G dwarf sample
INPUT:
logg= if True, cut on logg, if number, cut on logg > the number (>4.2)
ug= if True, cut on u-g, if list/array cut to ug[0] < u-g< ug[1]
ri= if True, cut on r-i, if list/array cut to ri[0] < r-i< ri[1]
sn= if False, don't cut on SN, if number cut on SN > the number (15)
ebv= if True, cut on E(B-V), if number cut on EBV < the number (0.3)
nocoords= if True, don't calculate distances or transform coordinates
OUTPUT:
cut data, returns numpy.recarray
HISTORY:
2011-07-08 - Written - Bovy@MPIA (NYU)
"""
raw= _load_fits(file)
#First cut on r
indx= (raw.field('dered_r') < 20.2)*(raw.field('dered_r') > 14.5)
raw= raw[indx]
#Then cut on g-r
indx= ((raw.field('dered_g')-raw.field('dered_r')) < 0.55)\
*((raw.field('dered_g')-raw.field('dered_r')) > .48)
raw= raw[indx]
#Cut on velocity errs
indx= (raw.field('pmra_err') > 0.)*(raw.field('pmdec_err') > 0.)\
*(raw.field('vr_err') > 0.)
raw= raw[indx]
#Cut on logg?
if (isinstance(logg,bool) and logg):
indx= (raw.field('logga') > 4.2)
raw= raw[indx]
elif not isinstance(logg,bool):
indx= (raw.field('logga') > logg)
raw= raw[indx]
if isinstance(ug,bool) and ug:
indx= ((raw.field('dered_u')-raw.field('dered_g')) < 2.)\
*((raw.field('dered_u')-raw.field('dered_g')) > .6)
raw= raw[indx]
if not isinstance(ug,bool):
indx= ((raw.field('dered_u')-raw.field('dered_g')) < ug[1])\
*((raw.field('dered_u')-raw.field('dered_g')) > ug[0])
raw= raw[indx]
if isinstance(ri,bool) and ri:
indx= ((raw.field('dered_r')-raw.field('dered_i')) < .4)\
*((raw.field('dered_r')-raw.field('dered_i')) > -.1)
raw= raw[indx]
elif not isinstance(ri,bool):
indx= ((raw.field('dered_r')-raw.field('dered_i')) < ri[1])\
*((raw.field('dered_r')-raw.field('dered_i')) > ri[0])
raw= raw[indx]
if (isinstance(sn,bool) and sn):
indx= (raw.field('sna') > 15.)
raw= raw[indx]
elif not isinstance(sn,bool):
indx= (raw.field('sna') > sn)
raw= raw[indx]
if isinstance(ebv,bool) and ebv:
indx= (raw.field('ebv') < .3)
raw= raw[indx]
elif not isinstance(ebv,bool):
indx= (raw.field('ebv') < ebv)
raw= raw[indx]
if nocoords: return raw
raw= _add_distances(raw)
raw= _add_velocities(raw)
return raw
def read_kdwarfs(file=_KDWARFALLFILE,logg=False,ug=False,ri=False,sn=True,
ebv=True,nocoords=False):
"""
NAME:
read_kdwarfs
PURPOSE:
read the spectroscopic K dwarf sample
INPUT:
logg= if True, cut on logg (default: >4.2)
ug= if True, cut on u-g
ri= if True, cut on r-i
sn= if False, don't cut on SN
ebv= if True, cut on E(B-V)
nocoords= if True, don't calculate distances or transform coordinates
OUTPUT:
cut data, returns numpy.recarray
HISTORY:
2011-07-11 - Written - Bovy@MPIA (NYU)
"""
raw= _load_fits(file)
#First cut on r
indx= (raw.field('dered_r') < 19.)*(raw.field('dered_r') > 14.5)
raw= raw[indx]
#Then cut on g-r
indx= ((raw.field('dered_g')-raw.field('dered_r')) < 0.75)\
*((raw.field('dered_g')-raw.field('dered_r')) > .55)
raw= raw[indx]
#Cut on velocity errs
indx= (raw.field('pmra_err') > 0.)*(raw.field('pmdec_err') > 0.)\
*(raw.field('vr_err') > 0.)
raw= raw[indx]
#Cut on logg?
if isinstance(logg,bool) and logg:
indx= (raw.field('logga') > 4.2)
raw= raw[indx]
elif not isinstance(logg,bool):
indx= (raw.field('logga') > logg)
raw= raw[indx]
if isinstance(ug,bool) and ug:
indx= ((raw.field('dered_u')-raw.field('dered_g')) < 2.5)\
*((raw.field('dered_u')-raw.field('dered_g')) > 1.5)
raw= raw[indx]
elif not isinstance(ug,bool):
indx= ((raw.field('dered_u')-raw.field('dered_g')) < ug[1])\
*((raw.field('dered_u')-raw.field('dered_g')) > ug[0])
raw= raw[indx]
if isinstance(ri,bool) and ri:
indx= ((raw.field('dered_r')-raw.field('dered_i')) < .7)\
*((raw.field('dered_r')-raw.field('dered_i')) > .1)
raw= raw[indx]
elif not isinstance(ri,bool):
indx= ((raw.field('dered_r')-raw.field('dered_i')) < ri[1])\
*((raw.field('dered_r')-raw.field('dered_i')) > ri[0])
raw= raw[indx]
if isinstance(sn,bool) and sn:
indx= (raw.field('sna') > 15.)
raw= raw[indx]
elif not isinstance(sn,bool):
indx= (raw.field('sna') > sn)
raw= raw[indx]
if isinstance(ebv,bool) and ebv:
indx= (raw.field('ebv') < .3)
raw= raw[indx]
elif not isinstance(ebv,bool):
indx= (raw.field('ebv') < ebv)
raw= raw[indx]
if nocoords: return raw
raw= _add_distances(raw)
raw= _add_velocities(raw)
return raw
def _add_distances(raw):
"""Add distances"""
ds,derrs= ivezic_dist_gr(raw.dered_g,raw.dered_r,raw.feh,
return_error=True,dg=raw.g_err,
dr=raw.r_err,dfeh=raw.feh_err)
raw= _append_field_recarray(raw,'dist',ds)
raw= _append_field_recarray(raw,'dist_err',derrs)
return raw
def _add_velocities(raw):
if not _COORDSLOADED:
print "galpy.util.bovy_coords failed to load ..."
print "Install galpy for coordinate transformations ..."
print "*not* adding velocities ..."
return raw
#We start from RA and Dec
lb= bovy_coords.radec_to_lb(raw.ra,raw.dec,degree=True)
XYZ= bovy_coords.lbd_to_XYZ(lb[:,0],lb[:,1],raw.dist,degree=True)
pmllpmbb= bovy_coords.pmrapmdec_to_pmllpmbb(raw.pmra,raw.pmdec,
raw.ra,raw.dec,degree=True)
#print numpy.mean(pmllpmbb[:,0]-raw.pml), numpy.std(pmllpmbb[:,0]-raw.pml)
#print numpy.mean(pmllpmbb[:,1]-raw.pmb), numpy.std(pmllpmbb[:,1]-raw.pmb)
vxvyvz= bovy_coords.vrpmllpmbb_to_vxvyvz(raw.vr,pmllpmbb[:,0],
pmllpmbb[:,1],lb[:,0],lb[:,1],
raw.dist,degree=True)
#Solar motion from Schoenrich & Binney
vxvyvz[:,0]+= -11.1
vxvyvz[:,1]+= 12.24
vxvyvz[:,2]+= 7.25
#print numpy.mean(vxvyvz[:,2]), numpy.std(vxvyvz[:,2])
#Propagate uncertainties
ndata= len(raw.ra)
cov_pmradec= numpy.zeros((ndata,2,2))
cov_pmradec[:,0,0]= raw.pmra_err**2.
cov_pmradec[:,1,1]= raw.pmdec_err**2.
cov_pmllbb= bovy_coords.cov_pmrapmdec_to_pmllpmbb(cov_pmradec,raw.ra,
raw.dec,degree=True)
cov_vxvyvz= bovy_coords.cov_dvrpmllbb_to_vxyz(raw.dist,
raw.dist_err,
raw.vr_err,
pmllpmbb[:,0],pmllpmbb[:,1],
cov_pmllbb,lb[:,0],lb[:,1],
degree=True)
#Cast
XYZ= XYZ.astype(numpy.float64)
vxvyvz= vxvyvz.astype(numpy.float64)
cov_vxvyvz= cov_vxvyvz.astype(numpy.float64)
#Append results to structure
raw= _append_field_recarray(raw,'xc',XYZ[:,0])
raw= _append_field_recarray(raw,'yc',XYZ[:,1])
raw= _append_field_recarray(raw,'zc',XYZ[:,2])
raw= _append_field_recarray(raw,'vxc',vxvyvz[:,0])
raw= _append_field_recarray(raw,'vyc',vxvyvz[:,1])
raw= _append_field_recarray(raw,'vzc',vxvyvz[:,2])
raw= _append_field_recarray(raw,'vxc_err',numpy.sqrt(cov_vxvyvz[:,0,0]))
raw= _append_field_recarray(raw,'vyc_err',numpy.sqrt(cov_vxvyvz[:,1,1]))
raw= _append_field_recarray(raw,'vzc_err',numpy.sqrt(cov_vxvyvz[:,2,2]))
raw= _append_field_recarray(raw,'vxvyc_rho',cov_vxvyvz[:,0,1]\
/numpy.sqrt(cov_vxvyvz[:,0,0])\
/numpy.sqrt(cov_vxvyvz[:,1,1]))
raw= _append_field_recarray(raw,'vxvzc_rho',cov_vxvyvz[:,0,2]\
/numpy.sqrt(cov_vxvyvz[:,0,0])\
/numpy.sqrt(cov_vxvyvz[:,2,2]))
raw= _append_field_recarray(raw,'vyvzc_rho',cov_vxvyvz[:,1,2]\
/numpy.sqrt(cov_vxvyvz[:,1,1])\
/numpy.sqrt(cov_vxvyvz[:,2,2]))
return raw
def _load_fits(file,ext=1):
"""Loads fits file's data and returns it as a numpy.recarray with lowercase field names"""
hdulist= pyfits.open(file)
out= hdulist[ext].data
hdulist.close()
return _as_recarray(out)
def _append_field_recarray(recarray, name, new):
new = numpy.asarray(new)
newdtype = numpy.dtype(recarray.dtype.descr + [(name, new.dtype)])
newrecarray = numpy.recarray(recarray.shape, dtype=newdtype)
for field in recarray.dtype.fields:
newrecarray[field] = recarray.field(field)
newrecarray[name] = new
return newrecarray
def _as_recarray(recarray):
"""go from FITS_rec to recarray"""
newdtype = numpy.dtype(recarray.dtype.descr)
newdtype.names= tuple([n.lower() for n in newdtype.names])
newrecarray = numpy.recarray(recarray.shape, dtype=newdtype)
for field in recarray.dtype.fields:
newrecarray[field.lower()] = recarray.field(field)
return newrecarray
#Ivezic and Juric distance functions
def _mr_gi(gi,feh,dgi=False,dfeh=False):
"""Ivezic+08 photometric distance"""
if dgi:
return 14.32-2.*12.97*gi+3.*6.127*gi**2.-4.*1.267*gi**3.\
+5.*0.0967*gi**4.
elif dfeh:
return -1.11-0.36*feh
else:
mro= -5.06+14.32*gi-12.97*gi**2.+6.127*gi**3.-1.267*gi**4.\
+0.0967*gi**5.
dmr= 4.5-1.11*feh-0.18*feh**2.
mr= mro+dmr
return mr
def _mr_ri_bright(ri,dri=False):
"""Juric+08 bright photometric distance"""
if dri:
return 13.3-2.*11.5*ri+3.*5.4*ri**2.-4.*0.7*ri**3.
else:
return 3.2+13.3*ri-11.5*ri**2.+5.4*ri**3.-0.7*ri**4.
def _mr_ri_faint(ri,dri=False):
"""Juric+08 faint photometric distance"""
if dri:
return 11.86-2.*10.74*ri+3.*5.99*ri**2.-4.*1.2*ri**3.
else:
return 4.+11.86*ri-10.74*ri**2.+5.99*ri**3.-1.2*ri**4.
def _gi_gr(gr,dr=False,dg=False):
"""(g-i) = (g-r)+(r-i), with Juric et al. (2008) stellar locus for g-r,
BOVY: JUST USES LINEAR APPROXIMATION VALID FOR < M0"""
if dg:
return 1.+1./2.34
elif dr:
return -1.-1./2.34
else:
ri= (gr-0.12)/2.34
return gr+ri
def _ri_gr(gr,dr=False,dg=False):
"""(r-i) = f(g-r), with Juric et al. (2008) stellar locus for g-r,
BOVY: JUST USES LINEAR APPROXIMATION VALID FOR < M0"""
if dg:
return 1./2.34
elif dr:
return 1./2.34
else:
ri= (gr-0.07)/2.34
return ri
############################CLEAN UP PHOTOMETRY################################
def _cleanup_photometry():
#Load plates
platestr= _load_fits(os.path.join(_SEGUESELECTDIR,
'segueplates.fits'))
plates= list(platestr.plate)
for ii in range(len(plates)):
plate= plates[ii]
platefile= os.path.join(_SEGUESELECTDIR,'segueplates',
'%i.fit' % plate)
try:
platephot= _load_fits(platefile)
except AttributeError:
continue
#Split into bright and faint
if 'faint' in platestr[ii].programname:
indx= (platephot.field('r') >= 17.8)
platephot= platephot[indx]
else:
indx= (platephot.field('r') < 17.8)
platephot= platephot[indx]
#Save
pyfits.writeto(platefile,platephot,clobber=True)
#########################ADD KS VALUES TO PLATES###############################
def _add_ks(outfile,sample='g',select='all'):
"""Add the KS probability to the segueplates file"""
#Load plates
platestr= _load_fits(os.path.join(_SEGUESELECTDIR,
'segueplates.fits'))
plates= list(platestr.plate)
#Load selection functions
sfconst= segueSelect(sn=True,sample=sample,
type_bright='constant',
type_faint='constant',select=select)
sfr= segueSelect(sn=True,sample=sample,
type_bright='r',
type_faint='r',select=select,
dr_bright=0.05,dr_faint=0.2,
robust_bright=True)
if sample.lower() == 'k' and select.lower() == 'program':
dr_bright= 0.4
dr_faint= 0.5
else:
dr_bright= 0.2
dr_faint= 0.2
sfplatesn_r= segueSelect(sn=True,sample=sample,
type_bright='platesn_r',
type_faint='platesn_r',select=select,
dr_bright=dr_bright,
dr_faint=dr_faint,
robust_bright=True)
sfsharp= segueSelect(sn=True,sample=sample,
type_bright='sharprcut',
type_faint='sharprcut',select=select)
sftanh= segueSelect(sn=True,sample=sample,
type_bright='tanhrcut',
type_faint='tanhrcut',select=select)
#Calculate KS for each plate
nplates= len(plates)
ksconst= numpy.zeros(nplates)
ksr= numpy.zeros(nplates)
ksplatesn_r= numpy.zeros(nplates)
kssharp= numpy.zeros(nplates)
kstanh= numpy.zeros(nplates)
for ii in range(nplates):
plate= plates[ii]
sys.stdout.write('\r'+"Working on plate %i" % plate)
sys.stdout.flush()
try:
ksconst[ii]= sfconst.check_consistency(plate)
except KeyError:
continue
ksr[ii]= sfr.check_consistency(plate)
ksplatesn_r[ii]= sfplatesn_r.check_consistency(plate)
kssharp[ii]= sfsharp.check_consistency(plate)
kstanh[ii]= sftanh.check_consistency(plate)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
#Add to platestr
platestr= _append_field_recarray(platestr,'ksconst_'+sample+'_'+select,
ksconst)
platestr= _append_field_recarray(platestr,'ksr_'+sample+'_'+select,
ksr)
platestr= _append_field_recarray(platestr,'ksplatesn_r_'+sample+'_'+select,
ksplatesn_r)
platestr= _append_field_recarray(platestr,'kssharp_'+sample+'_'+select,
kssharp)
platestr= _append_field_recarray(platestr,'kstanh_'+sample+'_'+select,
kstanh)
#Save
pyfits.writeto(outfile,platestr,clobber=True)
return
|
bsd-3-clause
|
perryjohnson/biplaneblade
|
sandia_blade_lib/prep_stn06_mesh.py
|
1
|
7155
|
"""Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 6
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
points_usc = [
(-0.75, 2.5),
( 0.75, 2.5),
( 0.75, 3.0),
(-0.75, 3.0)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
points_lsc = [
(-0.75,-3.0),
( 0.75,-3.0),
( 0.75,-2.5),
(-0.75,-2.5)
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# TE reinforcement ---------------------------------------------------------
label = 'TE reinforcement'
# create the bounding polygon
points_te = [
(1.84700000, 1.99925983),
(1.95, 2.1),
(3.0, 2.0),
(3.0,-2.0),
(1.95,-2.1),
(1.84700000, -1.99925983)
]
bounding_polygon = Polygon(points_te)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
is1 = st.internal_surface_1.layer['resin']
points_le = [
(-3.00,-3.0),
(-0.75,-3.0),
(-0.75000000, -2.58336990),
# (-0.75994863, -2.56986995),
is1.polygon.interiors[0].coords[-1],
is1.polygon.interiors[0].coords[-31],
# (-0.75994863, 2.56986995),
(-0.75000000, 2.58336990),
(-0.75, 3.0),
(-3.00, 3.0)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# upper aft panel 1 -------------------------------------------------------
label = 'upper aft panel 1'
# create the bounding polygon
points_ur = [
(0.75, 2.8),
(2.0, 2.8),
(1.95, 2.1),
points_te[0],
(1.84700000, 1.99792189),
(1.83700000, 1.99343115),
(0.8, 2.0),
(0.75949119, 2.56768163),
(0.75000000, 2.58336990)
]
bounding_polygon = Polygon(points_ur)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# lower aft panel 1 -------------------------------------------------------
label = 'lower aft panel 1'
# create the bounding polygon
points_lr = [
(0.75, -2.8),
(2.0, -2.8),
(1.95, -2.1),
points_te[-1],
(1.84700000, -points_ur[-5][1]),
(1.83700000, -points_ur[-4][1]),
(0.8, -2.0),
(0.75949119, -2.56768163),
(0.75000000, -points_ur[-1][1])
]
bounding_polygon = Polygon(points_lr)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.root_buildup, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.TE_reinforcement.layer['uniax'],
st.aft_panel_1.layer['upper'],
st.aft_panel_1.layer['lower'],
st.LE_panel.layer['foam']
])
|
gpl-3.0
|
gotomypc/scikit-learn
|
sklearn/neighbors/approximate.py
|
128
|
22351
|
"""Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
|
bsd-3-clause
|
rlong011/trading-with-python
|
cookbook/reconstructVXX/reconstructVXX.py
|
77
|
3574
|
# -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
|
bsd-3-clause
|
dingocuster/scikit-learn
|
sklearn/utils/tests/test_random.py
|
230
|
7344
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
|
bsd-3-clause
|
kambysese/mne-python
|
examples/time_frequency/plot_temporal_whitening.py
|
20
|
1828
|
"""
================================
Temporal whitening with AR model
================================
Here we fit an AR model to the data and use it
to temporally whiten the signals.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import fit_iir_model_raw
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_ecg-proj.fif'
raw = mne.io.read_raw_fif(raw_fname)
proj = mne.read_proj(proj_fname)
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
# Set up pick list: Gradiometers - bad channels
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
order = 5 # define model order
picks = picks[:1]
# Estimate AR models on raw data
b, a = fit_iir_model_raw(raw, order=order, picks=picks, tmin=60, tmax=180)
d, times = raw[0, 10000:20000] # look at one channel from now on
d = d.ravel() # make flat vector
innovation = signal.convolve(d, a, 'valid')
d_ = signal.lfilter(b, a, innovation) # regenerate the signal
d_ = np.r_[d_[0] * np.ones(order), d_] # dummy samples to keep signal length
###############################################################################
# Plot the different time series and PSDs
plt.close('all')
plt.figure()
plt.plot(d[:100], label='signal')
plt.plot(d_[:100], label='regenerated signal')
plt.legend()
plt.figure()
plt.psd(d, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(innovation, Fs=raw.info['sfreq'], NFFT=2048)
plt.psd(d_, Fs=raw.info['sfreq'], NFFT=2048, linestyle='--')
plt.legend(('Signal', 'Innovation', 'Regenerated signal'))
plt.show()
|
bsd-3-clause
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/sklearn/examples/applications/svm_gui.py
|
124
|
11251
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
try:
import tkinter as Tk
except ImportError:
# Backward compat for Python 2
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
mit
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/t2t/tensor2tensor/utils/decoding.py
|
3
|
30692
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decoding utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import operator
import os
import re
import time
import numpy as np
import six
from six.moves import input # pylint: disable=redefined-builtin
from tensor2tensor.data_generators import problem as problem_lib
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import mlperf_log
from tensor2tensor.utils import registry
import tensorflow as tf
FLAGS = tf.flags.FLAGS
# Number of samples to draw for an image input (in such cases as captioning)
IMAGE_DECODE_LENGTH = 100
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = tf.contrib.training.HParams(
save_images=False,
log_results=True,
extra_length=100,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1,
delimiter="\n",
decode_to_file=None,
decode_in_memory=False,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
num_decodes=1,
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# Total number of videos are max_display_outputs * num_decodes
max_display_outputs=10,
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp
def log_decode_results(inputs,
outputs,
problem_name,
prediction_idx,
inputs_vocab,
targets_vocab,
targets=None,
save_images=False,
output_dir=None,
identity_output=False,
log_results=True):
"""Log inference results."""
# TODO(lukaszkaiser) refactor this into feature_encoder
is_video = "video" in problem_name or "gym" in problem_name
if is_video:
def fix_and_save_video(vid, prefix):
save_path_template = os.path.join(
output_dir,
"%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx))
# this is only required for predictions
if vid.shape[-1] == 1:
vid = np.squeeze(vid, axis=-1)
save_video(vid, save_path_template)
tf.logging.info("Saving video: {}".format(prediction_idx))
fix_and_save_video(inputs, "inputs")
fix_and_save_video(outputs, "outputs")
fix_and_save_video(targets, "targets")
is_image = "image" in problem_name
is_text2class = isinstance(registry.problem(problem_name),
text_problems.Text2ClassProblem)
skip_eos_postprocess = is_image or is_text2class
decoded_inputs = None
if is_image and save_images:
save_path = os.path.join(
output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx))
show_and_save_image(inputs / 255., save_path)
elif inputs is not None and inputs_vocab:
if identity_output:
decoded_inputs = " ".join(map(str, inputs.flatten()))
else:
decoded_inputs = inputs_vocab.decode(_save_until_eos(
inputs, skip_eos_postprocess))
if log_results and not is_video:
tf.logging.info("Inference results INPUT: %s" % decoded_inputs)
decoded_targets = None
decoded_outputs = None
if identity_output:
decoded_outputs = " ".join(map(str, outputs.flatten()))
if targets is not None:
decoded_targets = " ".join(map(str, targets.flatten()))
else:
decoded_outputs = targets_vocab.decode(_save_until_eos(
outputs, skip_eos_postprocess))
if targets is not None and log_results:
decoded_targets = targets_vocab.decode(_save_until_eos(
targets, skip_eos_postprocess))
if not is_video:
tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs)
if targets is not None and log_results and not is_video:
tf.logging.info("Inference results TARGET: %s" % decoded_targets)
return decoded_inputs, decoded_outputs, decoded_targets
def decode_from_dataset(estimator,
problem_name,
hparams,
decode_hp,
decode_to_file=None,
dataset_split=None,
checkpoint_path=None):
"""Perform decoding from dataset."""
tf.logging.info("Performing local inference from dataset for %s.",
str(problem_name))
# We assume that worker_id corresponds to shard number.
shard = decode_hp.shard_id if decode_hp.shards > 1 else None
# Setup decode output directory for any artifacts that may be written out
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
# If decode_hp.batch_size is specified, use a fixed batch size
if decode_hp.batch_size:
hparams.batch_size = decode_hp.batch_size
hparams.use_fixed_batch_size = True
dataset_kwargs = {
"shard": shard,
"dataset_split": dataset_split,
"max_records": decode_hp.num_samples
}
# Build the inference input function
problem = hparams.problem
infer_input_fn = problem.make_estimator_input_fn(
tf.estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs)
predictions, output_dirs = [], []
for decode_id in range(decode_hp.num_decodes):
tf.logging.info("Decoding {}".format(decode_id))
# Create decode directory if not in-memory decoding.
if not decode_hp.decode_in_memory:
output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id)
tf.gfile.MakeDirs(output_dir)
output_dirs.append(output_dir)
result = decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=not decode_hp.decode_in_memory,
checkpoint_path=checkpoint_path)
if decode_hp.decode_in_memory:
output_dirs = [output_dir]
predictions.append(result)
if decode_hp.decode_to_file:
decode_hp.decode_to_file = _decode_filename(
decode_hp.decode_to_file, problem_name, decode_hp)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=problem,
output_dirs=output_dirs,
hparams=hparams,
decode_hparams=decode_hp,
predictions=predictions
), dataset_split)
return predictions
def decode_once(estimator,
problem_name,
hparams,
infer_input_fn,
decode_hp,
decode_to_file,
output_dir,
log_results=True,
checkpoint_path=None):
"""Decodes once."""
# Get the predictions as an iterable
predictions = estimator.predict(infer_input_fn,
checkpoint_path=checkpoint_path)
if not log_results:
return list(predictions)
# Prepare output file writers if decode_to_file passed
decode_to_file = decode_to_file or decode_hp.decode_to_file
if decode_to_file:
output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp)
parts = output_filepath.split(".")
parts[-1] = "targets"
target_filepath = ".".join(parts)
parts[-1] = "inputs"
input_filepath = ".".join(parts)
output_file = tf.gfile.Open(output_filepath, "w")
target_file = tf.gfile.Open(target_filepath, "w")
input_file = tf.gfile.Open(input_filepath, "w")
problem_hparams = hparams.problem_hparams
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
has_input = "inputs" in problem_hparams.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key]
targets_vocab = problem_hparams.vocabulary["targets"]
num_eval_samples = 0
for num_predictions, prediction in enumerate(predictions):
num_eval_samples += 1
num_predictions += 1
inputs = prediction.get("inputs")
targets = prediction.get("targets")
outputs = prediction.get("outputs")
# Log predictions
decoded_outputs = []
decoded_scores = []
if decode_hp.return_beams:
output_beams = np.split(outputs, decode_hp.beam_size, axis=0)
scores = None
if "scores" in prediction:
scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0)
for i, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % i)
score = scores and scores[i]
decoded = log_decode_results(
inputs,
beam,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=decode_hp.log_results)
decoded_outputs.append(decoded)
if decode_hp.write_beam_scores:
decoded_scores.append(score)
else:
decoded = log_decode_results(
inputs,
outputs,
problem_name,
num_predictions,
inputs_vocab,
targets_vocab,
save_images=decode_hp.save_images,
output_dir=output_dir,
identity_output=decode_hp.identity_output,
targets=targets,
log_results=decode_hp.log_results)
decoded_outputs.append(decoded)
# Write out predictions if decode_to_file passed
if decode_to_file:
for i, (d_input, d_output, d_target) in enumerate(decoded_outputs):
# Skip if all padding
if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input):
continue
beam_score_str = ""
if decode_hp.write_beam_scores:
beam_score_str = "\t%.2f" % decoded_scores[i]
output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter)
target_file.write(str(d_target) + decode_hp.delimiter)
input_file.write(str(d_input) + decode_hp.delimiter)
if (decode_hp.num_samples >= 0 and
num_predictions >= decode_hp.num_samples):
break
mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE,
value=num_eval_samples,
hparams=hparams)
if decode_to_file:
output_file.close()
target_file.close()
input_file.close()
def decode_from_file(estimator,
filename,
hparams,
decode_hp,
decode_to_file=None,
checkpoint_path=None):
"""Compute predictions on entries in filename and write them out."""
if not decode_hp.batch_size:
decode_hp.batch_size = 32
tf.logging.info(
"decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size)
# Inputs vocabulary is set to targets if there are no inputs in the problem,
# e.g., for language models where the inputs are just a prefix of targets.
p_hp = hparams.problem_hparams
has_input = "inputs" in p_hp.vocabulary
inputs_vocab_key = "inputs" if has_input else "targets"
inputs_vocab = p_hp.vocabulary[inputs_vocab_key]
targets_vocab = p_hp.vocabulary["targets"]
problem_name = FLAGS.problem
filename = _add_shard_to_filename(filename, decode_hp)
tf.logging.info("Performing decoding from file (%s)." % filename)
sorted_inputs, sorted_keys = _get_sorted_inputs(filename, decode_hp.delimiter)
num_decode_batches = (len(sorted_inputs) - 1) // decode_hp.batch_size + 1
def input_fn():
input_gen = _decode_batch_input_fn(
num_decode_batches, sorted_inputs,
inputs_vocab, decode_hp.batch_size,
decode_hp.max_input_size, task_id=decode_hp.multiproblem_task_id)
gen_fn = make_input_fn_from_generator(input_gen)
example = gen_fn()
return _decode_input_tensor_to_features_dict(example, hparams)
decodes = []
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
start_time = time.time()
total_time_per_step = 0
total_cnt = 0
def timer(gen):
while True:
try:
start_time = time.time()
item = next(gen)
elapsed_time = time.time() - start_time
yield elapsed_time, item
except StopIteration:
break
for elapsed_time, result in timer(result_iter):
if decode_hp.return_beams:
beam_decodes = []
beam_scores = []
output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(output_beams):
tf.logging.info("BEAM %d:" % k)
score = scores and scores[k]
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
beam,
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results)
beam_decodes.append(decoded_outputs)
if decode_hp.write_beam_scores:
beam_scores.append(score)
if decode_hp.write_beam_scores:
decodes.append("\t".join([
"\t".join([d, "%.2f" % s])
for d, s in zip(beam_decodes, beam_scores)
]))
else:
decodes.append("\t".join(beam_decodes))
else:
_, decoded_outputs, _ = log_decode_results(
result["inputs"],
result["outputs"],
problem_name,
None,
inputs_vocab,
targets_vocab,
log_results=decode_hp.log_results)
decodes.append(decoded_outputs)
total_time_per_step += elapsed_time
total_cnt += result["outputs"].shape[-1]
tf.logging.info("Elapsed Time: %5.5f" % (time.time() - start_time))
tf.logging.info("Averaged Single Token Generation Time: %5.7f "
"(time %5.7f count %d)" %
(total_time_per_step / total_cnt,
total_time_per_step, total_cnt))
# Reversing the decoded inputs and outputs because they were reversed in
# _decode_batch_input_fn
sorted_inputs.reverse()
decodes.reverse()
# If decode_to_file was provided use it as the output filename without change
# (except for adding shard_id if using more shards for decoding).
# Otherwise, use the input filename plus model, hp, problem, beam, alpha.
decode_filename = decode_to_file if decode_to_file else filename
if not decode_to_file:
decode_filename = _decode_filename(decode_filename, problem_name, decode_hp)
else:
decode_filename = _add_shard_to_filename(decode_filename, decode_hp)
tf.logging.info("Writing decodes into %s" % decode_filename)
outfile = tf.gfile.Open(decode_filename, "w")
for index in range(len(sorted_inputs)):
outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter))
outfile.flush()
outfile.close()
output_dir = os.path.join(estimator.model_dir, "decode")
tf.gfile.MakeDirs(output_dir)
run_postdecode_hooks(DecodeHookArgs(
estimator=estimator,
problem=hparams.problem,
output_dirs=[output_dir],
hparams=hparams,
decode_hparams=decode_hp,
predictions=list(result_iter)
), None)
def _add_shard_to_filename(filename, decode_hp):
if decode_hp.shards > 1:
shard_id = decode_hp.shard_id + decode_hp.shards_start_offset
filename = filename + ("%.3d" % shard_id)
return filename
def _decode_filename(base_filename, problem_name, decode_hp):
"""Generates decode filename.
Args:
base_filename: A string, base of the decode filename.
problem_name: A string, name of the problem.
decode_hp: HParams for decoding.
Returns:
A string, produced decode filename.
"""
if decode_hp.shards > 1:
base_filename = _add_shard_to_filename(base_filename, decode_hp)
if ("beam{beam}.alpha{alpha}.decodes".format(
beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha))
in base_filename):
return base_filename
else:
return (
"{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format(
base=base_filename,
model=FLAGS.model,
hp=FLAGS.hparams_set,
problem=problem_name,
beam=str(decode_hp.beam_size),
alpha=str(decode_hp.alpha)))
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None):
"""Interactive decoding."""
is_image = "image" in hparams.problem.name
is_text2class = isinstance(hparams.problem,
text_problems.Text2ClassProblem)
skip_eos_postprocess = (
is_image or is_text2class or decode_hp.skip_eos_postprocess)
def input_fn():
gen_fn = make_input_fn_from_generator(
_interactive_input_fn(hparams, decode_hp))
example = gen_fn()
example = _interactive_input_tensor_to_features_dict(example, hparams)
return example
result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path)
for result in result_iter:
targets_vocab = hparams.problem_hparams.vocabulary["targets"]
if decode_hp.return_beams:
beams = np.split(result["outputs"], decode_hp.beam_size, axis=0)
scores = None
if "scores" in result:
if np.isscalar(result["scores"]):
result["scores"] = result["scores"].reshape(1)
scores = np.split(result["scores"], decode_hp.beam_size, axis=0)
for k, beam in enumerate(beams):
tf.logging.info("BEAM %d:" % k)
beam_string = targets_vocab.decode(_save_until_eos(
beam, skip_eos_postprocess))
if scores is not None:
tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k]))
else:
tf.logging.info("\"%s\"" % beam_string)
else:
if decode_hp.identity_output:
tf.logging.info(" ".join(map(str, result["outputs"].flatten())))
else:
tf.logging.info(
targets_vocab.decode(_save_until_eos(
result["outputs"], skip_eos_postprocess)))
def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary,
batch_size, max_input_size, task_id=-1):
"""Generator to produce batches of inputs."""
tf.logging.info(" batch %d" % num_decode_batches)
# First reverse all the input sentences so that if you're going to get OOMs,
# you'll see it in the first batch
sorted_inputs.reverse()
for b in range(num_decode_batches):
tf.logging.info("Decoding batch %d" % b)
batch_length = 0
batch_inputs = []
for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]:
input_ids = vocabulary.encode(inputs)
if max_input_size > 0:
# Subtract 1 for the EOS_ID.
input_ids = input_ids[:max_input_size - 1]
final_id = text_encoder.EOS_ID if task_id < 0 else task_id
input_ids.append(final_id)
batch_inputs.append(input_ids)
if len(input_ids) > batch_length:
batch_length = len(input_ids)
final_batch_inputs = []
for input_ids in batch_inputs:
assert len(input_ids) <= batch_length
x = input_ids + [0] * (batch_length - len(input_ids))
final_batch_inputs.append(x)
yield {
"inputs": np.array(final_batch_inputs).astype(np.int32),
}
def _interactive_input_fn(hparams, decode_hp):
"""Generator that reads from the terminal and yields "interactive inputs".
Due to temporary limitations in tf.learn, if we don't want to reload the
whole graph, then we are stuck encoding all of the input as one fixed-size
numpy array.
We yield int32 arrays with shape [const_array_size]. The format is:
[num_samples, decode_length, len(input ids), <input ids>, <padding>]
Args:
hparams: model hparams
decode_hp: decode hparams
Yields:
numpy arrays
Raises:
Exception: when `input_type` is invalid.
"""
num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1
decode_length = decode_hp.extra_length
input_type = "text"
p_hparams = hparams.problem_hparams
has_input = "inputs" in p_hparams.modality
vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"]
# This should be longer than the longest input.
const_array_size = 10000
# Import readline if available for command line editing and recall.
try:
import readline # pylint: disable=g-import-not-at-top,unused-variable
except ImportError:
pass
while True:
prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n"
" it=<input_type> ('text' or 'image' or 'label', default: "
"text)\n"
" ns=<num_samples> (changes number of samples, default: 1)\n"
" dl=<decode_length> (changes decode length, default: 100)\n"
" <%s> (decode)\n"
" q (quit)\n"
">" % (num_samples, decode_length, "source_string"
if has_input else "target_prefix"))
input_string = input(prompt)
if input_string == "q":
return
elif input_string[:3] == "ns=":
num_samples = int(input_string[3:])
elif input_string[:3] == "dl=":
decode_length = int(input_string[3:])
elif input_string[:3] == "it=":
input_type = input_string[3:]
else:
if input_type == "text":
input_ids = vocabulary.encode(input_string)
if has_input:
input_ids.append(text_encoder.EOS_ID)
x = [num_samples, decode_length, len(input_ids)] + input_ids
assert len(x) < const_array_size
x += [0] * (const_array_size - len(x))
features = {
"inputs": np.array(x).astype(np.int32),
}
elif input_type == "image":
input_path = input_string
img = vocabulary.encode(input_path)
features = {
"inputs": img.astype(np.int32),
}
elif input_type == "label":
input_ids = [int(input_string)]
x = [num_samples, decode_length, len(input_ids)] + input_ids
features = {
"inputs": np.array(x).astype(np.int32),
}
else:
raise Exception("Unsupported input type.")
for k, v in six.iteritems(
problem_lib.problem_hparams_to_features(p_hparams)):
features[k] = np.array(v).astype(np.int32)
yield features
def save_video(video, save_path_template):
"""Save frames of the videos into files."""
try:
from PIL import Image # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires PIL library to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
for i, frame in enumerate(video):
save_path = save_path_template.format(i)
with tf.gfile.Open(save_path, "wb") as sp:
Image.fromarray(np.uint8(frame)).save(sp)
def show_and_save_image(img, save_path):
"""Shows an image using matplotlib and saves it."""
try:
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
except ImportError as e:
tf.logging.warning(
"Showing and saving an image requires matplotlib to be "
"installed: %s", e)
raise NotImplementedError("Image display and save not implemented.")
plt.imshow(img)
with tf.gfile.Open(save_path, "wb") as sp:
plt.savefig(sp)
def _get_sorted_inputs(filename, delimiter="\n"):
"""Returning inputs sorted according to length.
Args:
filename: path to file with inputs, 1 per line.
delimiter: str, delimits records in the file.
Returns:
a sorted list of inputs
"""
tf.logging.info("Getting sorted inputs")
with tf.gfile.Open(filename) as f:
text = f.read()
records = text.split(delimiter)
inputs = [record.strip() for record in records]
# Strip the last empty line.
if not inputs[-1]:
inputs.pop()
input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)]
sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1))
# We'll need the keys to rearrange the inputs back into their original order
sorted_keys = {}
sorted_inputs = []
for i, (index, _) in enumerate(sorted_input_lens):
sorted_inputs.append(inputs[index])
sorted_keys[index] = i
return sorted_inputs, sorted_keys
def _save_until_eos(ids, skip=False):
"""Strips everything after the first <EOS> token, which is normally 1."""
ids = ids.flatten()
if skip:
return ids
try:
index = list(ids).index(text_encoder.EOS_ID)
return ids[0:index]
except ValueError:
# No EOS_ID: return the array as-is.
return ids
def _interactive_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False if len(inputs.get_shape()) < 3 else True
x = inputs
if input_is_image:
x = tf.image.resize_images(x, [299, 299])
x = tf.reshape(x, [1, 299, 299, -1])
x = tf.to_int32(x)
else:
# Remove the batch dimension.
num_samples = x[0]
length = x[2]
x = tf.slice(x, [3], tf.to_int32([length]))
x = tf.reshape(x, [1, -1, 1, 1])
# Transform into a batch of size num_samples to get that many random
# decodes.
x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1]))
p_hparams = hparams.problem_hparams
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else inputs[1])
features["inputs"] = x
return features
def _decode_input_tensor_to_features_dict(feature_map, hparams):
"""Convert the interactive input format (see above) to a dictionary.
Args:
feature_map: dict with inputs.
hparams: model hyperparameters
Returns:
a features dictionary, as expected by the decoder.
"""
inputs = tf.convert_to_tensor(feature_map["inputs"])
input_is_image = False
x = inputs
p_hparams = hparams.problem_hparams
# Add a third empty dimension
x = tf.expand_dims(x, axis=[2])
x = tf.to_int32(x)
input_space_id = tf.constant(p_hparams.input_space_id)
target_space_id = tf.constant(p_hparams.target_space_id)
features = {}
features["input_space_id"] = input_space_id
features["target_space_id"] = target_space_id
features["decode_length"] = (
IMAGE_DECODE_LENGTH if input_is_image else tf.shape(x)[1] + 50)
features["inputs"] = x
return features
def latest_checkpoint_step(ckpt_dir):
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if not ckpt:
return None
path = ckpt.model_checkpoint_path
step = int(path.split("-")[-1])
return step
class DecodeHookArgs(collections.namedtuple(
"DecodeHookArgs",
["estimator", "problem", "output_dirs", "hparams",
"decode_hparams", "predictions"])):
pass
def run_postdecode_hooks(decode_hook_args, dataset_split):
"""Run hooks after decodes have run."""
hooks = decode_hook_args.problem.decode_hooks
if not hooks:
return
global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir)
if global_step is None:
tf.logging.info(
"Skipping decode hooks because no checkpoint yet available.")
return
tf.logging.info("Running decode hooks.")
parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir)
child_dir = decode_hook_args.decode_hparams.summaries_log_dir
if dataset_split is not None:
child_dir += "_{}".format(dataset_split)
final_dir = os.path.join(parent_dir, child_dir)
summary_writer = tf.summary.FileWriter(final_dir)
for hook in hooks:
# Isolate each hook in case it creates TF ops
with tf.Graph().as_default():
summaries = hook(decode_hook_args)
if summaries:
summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(summary, global_step)
summary_writer.close()
tf.logging.info("Decode hooks done.")
|
apache-2.0
|
jblackburne/scikit-learn
|
doc/conf.py
|
12
|
9568
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2016, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
|
bsd-3-clause
|
AlexRobson/scikit-learn
|
sklearn/ensemble/weight_boosting.py
|
97
|
40773
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
bsd-3-clause
|
ominux/scikit-learn
|
sklearn/neighbors/classification.py
|
1
|
12134
|
"""Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
#
# License: BSD, (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr, deprecated
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print neigh.predict([[1.5]])
[0]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
References
----------
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform',
algorithm='auto', leaf_size=30):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode, _ = stats.mode(pred_labels, axis=1)
else:
mode, _ = weighted_mode(pred_labels, weights, axis=1)
return mode.flatten().astype(np.int)
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print neigh.predict([[1.5]])
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
References
----------
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
pred_labels = [self._y[ind] for ind in neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode = np.asarray([stats.mode(pl)[0] for pl in pred_labels],
dtype=np.int)
else:
mode = np.asarray([weighted_mode(pl, w)[0]
for (pl, w) in zip(pred_labels, weights)],
dtype=np.int)
return mode.flatten().astype(np.int)
class NeighborsClassifier(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, SupervisedIntegerMixin,
ClassifierMixin):
"""Classifier implementing the nearest neighbors vote. (Deprecated)
DEPRECATED IN VERSION 0.9; WILL BE REMOVED IN VERSION 0.11
Please use :class:`KNeighborsClassifier` or
:class:`RadiusNeighborsClassifier` instead.
Samples participating in the vote are either the k-nearest neighbors
(for some k) or all neighbors within some fixed radius around the sample
to classify.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
classification_type : {'knn_vote', 'radius_vote'}, optional
Type of fit to use: 'knn_vote' specifies a k-NN classification.
'radius_vote' specifies a r-NN classification. Default is 'knn_vote'.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import NeighborsClassifier
>>> neigh = NeighborsClassifier(n_neighbors=2)
>>> neigh.fit(X, y)
NeighborsClassifier(algorithm='auto', classification_type='knn_vote',
leaf_size=30, n_neighbors=2, radius=1.0)
>>> print neigh.predict([[1.5]])
[0]
See also
--------
NearestNeighbors
NeighborsRegressor
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
References
----------
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30,
classification_type='knn_vote'):
if classification_type not in ('radius_vote', 'knn_vote'):
raise ValueError("classification_type not recognized")
self.classification_type = classification_type
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
if self.classification_type == 'knn_vote':
neigh_ind = self.kneighbors(X, return_distance=False)
pred_labels = self._y[neigh_ind]
mode, _ = stats.mode(pred_labels, axis=1)
return mode.flatten().astype(np.int)
else:
neigh_ind = self.radius_neighbors(X, return_distance=False)
pred_labels = [self._y[ind] for ind in neigh_ind]
return np.asarray([stats.mode(pi) for pi in pred_labels],
dtype=np.int)
NeighborsClassifier = deprecated(
"deprecated in v0.9; will be removed in v0.11; "
"use KNeighborsClassifier or RadiusNeighborsClassifier instead")(
NeighborsClassifier)
|
bsd-3-clause
|
clingsz/GAE
|
misc/gap.py
|
1
|
4899
|
import numpy as np
from numpy import zeros
import random
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering as ag
import scipy.spatial.distance
dst = scipy.spatial.distance.euclidean
import scipy.cluster.vq
import misc.utils as utils
def agclust(data,k):
a = ag(n_clusters=k)
a.fit(data)
lbs = a.labels_
cids = []
kmc = np.zeros([k,data.shape[1]])
for i in range(k):
lst = np.where(lbs==i)[0]
kmc[i,:] = np.mean(data[lst,:],axis=0)
cids.append(lst)
return kmc,lbs
def cluster_points(X, mu):
clusters = {}
for x in X:
bestmukey = min([(i[0], np.linalg.norm(x-mu[i[0]])) \
for i in enumerate(mu)], key=lambda t:t[1])[0]
try:
clusters[bestmukey].append(x)
except KeyError:
clusters[bestmukey] = [x]
return clusters
def reevaluate_centers(mu, clusters):
newmu = []
keys = sorted(clusters.keys())
for k in keys:
newmu.append(np.mean(clusters[k], axis = 0))
return newmu
def has_converged(mu, oldmu):
return (set([tuple(a) for a in mu]) == set([tuple(a) for a in oldmu]))
def find_centers(X, K):
# Initialize to K random centers
oldmu = random.sample(X, K)
mu = random.sample(X, K)
while not has_converged(mu, oldmu):
oldmu = mu
# Assign all points in X to clusters
clusters = cluster_points(X, mu)
# Reevaluate centers
mu = reevaluate_centers(oldmu, clusters)
return(mu, clusters)
def Wk(mu, clusters):
K = len(mu)
return sum([np.linalg.norm(mu[i]-c)**2/(2*len(c)) \
for i in range(K) for c in clusters[i]])
def dist(data,K):
kmc,kml = agclust(data,K)
# kmc,kml = scipy.cluster.vq.kmeans2(data, K)
disp = sum([dst(data[m,:],kmc[kml[m],:]) for m in range(data.shape[0])])
return disp
def bounding_box(X):
# xmin, xmax = min(X,key=lambda a:a[0])[0], max(X,key=lambda a:a[0])[0]
# ymin, ymax = min(X,key=lambda a:a[1])[1], max(X,key=lambda a:a[1])[1]
xmin = X.min(axis=0)
xmax = X.max(axis=0)
return (xmin,xmax)
def gap_statistic(X,B=10,kMin=1,kMax=50):
(xmin,xmax) = bounding_box(X)
# Dispersion for real distribution
# ks = range(1,10)
Wks = []
Wkbs = []
gaps = []
sks = []
flag = False
indk = 0
k = kMin
ks = []
while not flag and k<=kMax:
print 'boostraping for k=',str(k),
Wk = np.log(dist(X,k))
BWkbs = zeros(B)
for i in range(B):
print '+',
Xb = np.random.uniform(low=xmin,high=xmax,size=X.shape)
BWkbs[i] = np.log(dist(Xb,k))
print 'done'
Wkb = sum(BWkbs)/B
sk = np.sqrt(sum((BWkbs-Wkb)**2)/B)
sk = sk*np.sqrt(1+1/B)
gap = Wkb - Wk
Wkbs.append(Wkb)
Wks.append(Wk)
sks.append(sk)
gaps.append(gap)
ks.append(k)
if indk>0:
gapinc = gaps[indk-1] - (gaps[indk]-sks[indk])
if (gapinc>=0):
print 'GapInc=%.4f, k=%d is a good cluster' % (gapinc,k-1)
flag = True
else:
print 'GapInc=%.4f, try k=%d' % (gapinc,k)
indk = indk + 1
k = k + 1
if not flag:
print 'kMax=%d still too small for a good cluster. Try larger one' % (kMax)
return(ks, Wks, Wkbs, sks)
def init_board_gauss(N, k):
n = float(N)/k
X = []
for i in range(k):
c = (random.uniform(-1, 1), random.uniform(-1, 1))
s = random.uniform(0.05,0.05)
x = []
while len(x) < n:
a, b = np.array([np.random.normal(c[0], s), np.random.normal(c[1], s)])
# Continue drawing points from the distribution in the range [-1,1]
if abs(a) < 1 and abs(b) < 1:
x.append([a,b])
X.extend(x)
X = np.array(X)[:N]
return X
def analyze_gap_result(B,PLOT=False):
ks,logWks,logWkbs,sk = utils.loadobj('temp/gapstats_B'+str(B)+'.pkl')
gaps = np.asarray(logWkbs) - np.asarray(logWks)
gapinc = gaps[:-1]-gaps[1:]+sk[1:]
lst = np.where(gapinc>=0)[0]
best_cluster_number = ks[lst[0]]
print 'best cluster number is:', ks[lst[0]]
if PLOT:
plt.plot(ks[:-1],gapinc,'-o')
plt.plot([min(ks),max(ks)],[0,0],'k--')
plt.xlabel('Cluster number')
plt.ylabel('Decision bound (>=0)')
plt.title('BS='+str(B)+', Best Cluster Number = ' + str(ks[lst[0]]))
plt.grid()
return best_cluster_number
def fit_gap_stats(z,bootstraps=1000,kMin=1,kMax=50):
print 'Clustering for matrix observation x feature: ', z.shape
B = bootstraps
gap_result = gap_statistic(z,B,kMin,kMax)
utils.saveobj('temp/gapstats_B'+str(B)+'.pkl',gap_result)
best_cluster_number = analyze_gap_result(B)
return best_cluster_number
|
gpl-3.0
|
wzbozon/statsmodels
|
statsmodels/graphics/tests/test_gofplots.py
|
27
|
6814
|
import numpy as np
from numpy.testing import dec
import statsmodels.api as sm
from statsmodels.graphics.gofplots import qqplot, qqline, ProbPlot
from scipy import stats
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
class BaseProbplotMixin(object):
def base_setup(self):
if have_matplotlib:
self.fig, self.ax = plt.subplots()
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_ppplot(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_probplot(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_array(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_array(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_array(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_array)
@dec.skipif(not have_matplotlib)
def test_qqplot_other_prbplt(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_ppplot_other_prbplt(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def t_est_probplot_other_prbplt(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
other=self.other_prbplot)
@dec.skipif(not have_matplotlib)
def test_qqplot_custom_labels(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_ppplot_custom_labels(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_probplot_custom_labels(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
xlabel='Custom X-Label',
ylabel='Custom Y-Label')
@dec.skipif(not have_matplotlib)
def test_qqplot_pltkwargs(self):
self.fig = self.prbplt.qqplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_ppplot_pltkwargs(self):
self.fig = self.prbplt.ppplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
@dec.skipif(not have_matplotlib)
def test_probplot_pltkwargs(self):
self.fig = self.prbplt.probplot(ax=self.ax, line=self.line,
marker='d',
markerfacecolor='cornflowerblue',
markeredgecolor='white',
alpha=0.5)
class TestProbPlotLongely(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.line = 'r'
self.base_setup()
class TestProbPlotRandomNormalMinimal(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data)
self.line = None
self.base_setup()
class TestProbPlotRandomNormalWithFit(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, fit=True)
self.line = 'q'
self.base_setup()
class TestProbPlotRandomNormalLocScale(BaseProbplotMixin):
def setup(self):
np.random.seed(5)
self.data = np.random.normal(loc=8.25, scale=3.25, size=37)
self.prbplt = sm.ProbPlot(self.data, loc=8.25, scale=3.25)
self.line = '45'
self.base_setup()
class TestTopLevel(object):
def setup(self):
self.data = sm.datasets.longley.load()
self.data.exog = sm.add_constant(self.data.exog, prepend=False)
self.mod_fit = sm.OLS(self.data.endog, self.data.exog).fit()
self.res = self.mod_fit.resid
self.prbplt = sm.ProbPlot(self.mod_fit.resid, stats.t, distargs=(4,))
self.other_array = np.random.normal(size=self.prbplt.data.shape)
self.other_prbplot = sm.ProbPlot(self.other_array)
def teardown(self):
if have_matplotlib:
plt.close('all')
@dec.skipif(not have_matplotlib)
def test_qqplot(self):
fig = sm.qqplot(self.res, line='r')
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_ProbPlotObjects(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with `ProbPlot` instances
fig = sm.qqplot_2samples(self.prbplt, self.other_prbplot,
line=line)
@dec.skipif(not have_matplotlib)
def test_qqplot_2samples_arrays(self):
# also tests all values for line
for line in ['r', 'q', '45', 's']:
# test with arrays
fig = sm.qqplot_2samples(self.res, self.other_array, line=line)
|
bsd-3-clause
|
brianr747/SFC_models
|
sfc_models/examples/scripts/ex20171127_equations_state_space_models.py
|
1
|
3086
|
"""
ex20171127_equations_state_space_models.py
And now for something completely different...
This code just uses the equation parser and solver to generate the solutions for state space models.
One of the stranger applications of framework code...
"""
from sfc_models.equation_solver import EquationSolver
from sfc_models.examples.Quick2DPlot import Quick2DPlot
import matplotlib.pyplot as plt
def generate_model(eqns, use_control=False):
if use_control:
eqns += """
u = (-.7 * x + .75 * r)/.05
u(0) = 1.0
exogenous
r = [1.] * 10 + [2.]*21
"""
else:
eqns += """
exogenous
u = [1.] * 10 + [2.]*21
"""
print(eqns)
out = EquationSolver(eqns)
out.Parser.MaxTime = 30
out.SolveEquation()
return out
def generate_base_model(a, use_control):
eqns = """
x = (1-a)*lag_x + a * lag_u
y = x
lag_x = x(k-1)
lag_u = u(k-1)
x(0) = 1.
a = {0}
""".format(a)
# print(eqns)
out = generate_model(eqns, use_control)
return out
def generate_actual_model(use_control):
eqns = """
x = (1-a)*lag_x + a * lag_h
y = x
g = lag_u
h = lag_g
lag_x = x(k-1)
lag_u = u(k-1)
lag_g = g(k-1)
lag_h = h(k-1)
x(0) = 1.
g(0) = 1.
h(0) = 1.
a = .05
"""
return generate_model(eqns, use_control)
def main():
mod_0 = generate_base_model(.05, use_control=False)
k = mod_0.TimeSeries['k']
x_0 = mod_0.TimeSeries['x']
mod_1 = generate_base_model(.005, use_control=False)
x_1 = mod_1.TimeSeries['x']
mod_2 = generate_base_model(.09, use_control=False)
x_2 = mod_2.TimeSeries['x']
mod_act = generate_actual_model(use_control=False)
x_act = mod_act.TimeSeries['x']
plt.plot(k, x_0, 'bo', k, x_1, 'b+', k, x_2, 'b--')
plt.title('Assumed Model Output, Plus Perturbed Systems')
plt.grid()
plt.savefig('robust_1.png', dpi=90)
plt.show()
plt.plot(k, x_act, '-ro', k, x_0, 'bo', k, x_1, 'b+', k, x_2, 'b--')
plt.title('True Model, Plus Assumed Outputs')
plt.grid()
plt.savefig('robust_2.png', dpi=90)
plt.show()
mod_0 = generate_base_model(.05, use_control=True)
k = mod_0.TimeSeries['k']
x_0 = mod_0.TimeSeries['x']
mod_1 = generate_base_model(.005, use_control=True)
x_1 = mod_1.TimeSeries['x']
mod_2 = generate_base_model(.09, use_control=True)
x_2 = mod_2.TimeSeries['x']
mod_act = generate_actual_model(use_control=True)
x_act = mod_act.TimeSeries['x']
plt.plot(k, x_0, 'bo', k, x_1, 'b+', k, x_2, 'b--')
plt.title('Assumed Feedback Control Response')
plt.grid()
plt.savefig('robust_3.png', dpi=90)
plt.show()
plt.plot(k, x_act, '-ro', k, x_0, 'bo', k, x_1, 'b+', k, x_2, 'b--')
plt.title('Actual Feedback Response')
plt.grid()
plt.savefig('robust4.png', dpi=90)
plt.show()
# plt.plot(k, x_act, 'ro', k, x_1, 'b+', k, x_2, 'b--')
# plt.grid()
# plt.show()
#Quick2DPlot(k, x_0, 'Base Output')
if __name__ == '__main__':
main()
|
apache-2.0
|
jalanb/jab
|
ipython/profile_default/ipython_config.py
|
6
|
18065
|
# Configuration file for ipython.
# pylint: disable=E0602
c = get_config()
# -----------------------------------------------------------------------------
# InteractiveShellApp configuration
# -----------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.InteractiveShellApp.gui = None
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = None
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an 'import *' is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# ~/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# Whether to display a banner upon starting IPython.
c.TerminalIPythonApp.display_banner = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration ('qt', 'wx', 'gtk', 'glut', 'pyglet',
# 'osx').
# c.TerminalIPythonApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = True
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
c.TerminalInteractiveShell.colors = 'Linux'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
#
# c.TerminalInteractiveShell.separate_out = ''
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
c.TerminalInteractiveShell.autocall = 1
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
c.TerminalInteractiveShell.editor = '/usr/local/bin/vim'
# Shortcut style to use at the prompt
c.TerminalInteractiveShell.editing_mode = 'vi'
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.1 (r271:86832, Aug 2 2012, 13:51:44) \nType "copyright", "credits" or "license" for more information.\n\nIPython 1.0.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
c.TerminalInteractiveShell.confirm_exit = False
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode.
# c.TerminalInteractiveShell.logappend = ''
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = '{my_prompt}'
# Continuation prompt.
# c.PromptManager.in2_template = '... '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
#
# c.HistoryManager.db_log_output = False
#
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_path
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_path = {}
|
mit
|
arjoly/scikit-learn
|
examples/cluster/plot_agglomerative_clustering.py
|
343
|
2931
|
"""
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
bsd-3-clause
|
zhuangfangwang/gmmreg
|
Python/_plotting.py
|
14
|
2435
|
#!/usr/bin/env python
#coding=utf-8
##====================================================
## $Author$
## $Date$
## $Revision$
##====================================================
from pylab import *
from configobj import ConfigObj
import matplotlib.pyplot as plt
def display2Dpointset(A):
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.grid(True)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
for i,x in enumerate(A):
ax.annotate('%d'%(i+1), xy = x, xytext = x + 0)
ax.set_axis_off()
#fig.show()
def display2Dpointsets(A, B, ax = None):
""" display a pair of 2D point sets """
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1)
#pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6])
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
def display3Dpointsets(A,B,ax):
#ax.plot3d(A[:,0],A[:,1],A[:,2],'yo',markersize=10,mew=1)
#ax.plot3d(B[:,0],B[:,1],B[:,2],'b+',markersize=10,mew=1)
ax.scatter(A[:,0],A[:,1],A[:,2], c = 'y', marker = 'o')
ax.scatter(B[:,0],B[:,1],B[:,2], c = 'b', marker = '+')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
from mpl_toolkits.mplot3d import Axes3D
def displayABC(A,B,C):
fig = plt.figure()
dim = A.shape[1]
if dim==2:
ax = plt.subplot(121)
display2Dpointsets(A, B, ax)
ax = plt.subplot(122)
display2Dpointsets(C, B, ax)
if dim==3:
plot1 = plt.subplot(1,2,1)
ax = Axes3D(fig, rect = plot1.get_position())
display3Dpointsets(A,B,ax)
plot2 = plt.subplot(1,2,2)
ax = Axes3D(fig, rect = plot2.get_position())
display3Dpointsets(C,B,ax)
plt.show()
def display_pts(f_config):
config = ConfigObj(f_config)
file_section = config['FILES']
mf = file_section['model']
sf = file_section['scene']
tf = file_section['transformed_model']
m = np.loadtxt(mf)
s = np.loadtxt(sf)
t = np.loadtxt(tf)
displayABC(m,s,t)
|
gpl-3.0
|
Sentient07/scikit-learn
|
benchmarks/bench_plot_nmf.py
|
28
|
15630
|
"""
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import fast_dot, safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
self.nls_max_iter = nls_max_iter
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
|
bsd-3-clause
|
petosegan/scikit-learn
|
examples/cluster/plot_kmeans_assumptions.py
|
270
|
2040
|
"""
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
|
bsd-3-clause
|
ELind77/gensim
|
gensim/sklearn_integration/sklearn_wrapper_gensim_atmodel.py
|
1
|
5466
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
Follows scikit-learn API conventions
"""
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim.sklearn_integration import BaseSklearnWrapper
class SklATModel(BaseSklearnWrapper, TransformerMixin, BaseEstimator):
"""
Base AuthorTopic module
"""
def __init__(self, num_topics=100, id2word=None, author2doc=None, doc2author=None,
chunksize=2000, passes=1, iterations=50, decay=0.5, offset=1.0,
alpha='symmetric', eta='symmetric', update_every=1, eval_every=10,
gamma_threshold=0.001, serialized=False, serialization_path=None,
minimum_probability=0.01, random_state=None):
"""
Sklearn wrapper for AuthorTopic model. Class derived from gensim.models.AuthorTopicModel
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.author2doc = author2doc
self.doc2author = doc2author
self.chunksize = chunksize
self.passes = passes
self.iterations = iterations
self.decay = decay
self.offset = offset
self.alpha = alpha
self.eta = eta
self.update_every = update_every
self.eval_every = eval_every
self.gamma_threshold = gamma_threshold
self.serialized = serialized
self.serialization_path = serialization_path
self.minimum_probability = minimum_probability
self.random_state = random_state
def get_params(self, deep=True):
"""
Returns all parameters as dictionary.
"""
return {"num_topics": self.num_topics, "id2word": self.id2word,
"author2doc": self.author2doc, "doc2author": self.doc2author, "chunksize": self.chunksize,
"passes": self.passes, "iterations": self.iterations, "decay": self.decay,
"offset": self.offset, "alpha": self.alpha, "eta": self.eta, "update_every": self.update_every,
"eval_every": self.eval_every, "gamma_threshold": self.gamma_threshold,
"serialized": self.serialized, "serialization_path": self.serialization_path,
"minimum_probability": self.minimum_probability, "random_state": self.random_state}
def set_params(self, **parameters):
"""
Set all parameters.
"""
super(SklATModel, self).set_params(**parameters)
return self
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.AuthorTopicModel
"""
self.gensim_model = models.AuthorTopicModel(corpus=X, num_topics=self.num_topics, id2word=self.id2word,
author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,
iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,
update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold, serialized=self.serialized,
serialization_path=self.serialization_path, minimum_probability=self.minimum_probability, random_state=self.random_state)
return self
def transform(self, author_names):
"""
Return topic distribution for input authors as a list of
(topic_id, topic_probabiity) 2-tuples.
"""
# The input as array of array
if self.gensim_model is None:
raise NotFittedError("This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method.")
check = lambda x: [x] if not isinstance(x, list) else x
author_names = check(author_names)
X = [[] for _ in range(0, len(author_names))]
for k, v in enumerate(author_names):
transformed_author = self.gensim_model[v]
probs_author = list(map(lambda x: x[1], transformed_author))
# Everything should be equal in length
if len(probs_author) != self.num_topics:
probs_author.extend([1e-12] * (self.num_topics - len(probs_author)))
X[k] = probs_author
return np.reshape(np.array(X), (len(author_names), self.num_topics))
def partial_fit(self, X, author2doc=None, doc2author=None):
"""
Train model over X.
"""
if self.gensim_model is None:
self.gensim_model = models.AuthorTopicModel(corpus=X, num_topics=self.num_topics, id2word=self.id2word,
author2doc=self.author2doc, doc2author=self.doc2author, chunksize=self.chunksize, passes=self.passes,
iterations=self.iterations, decay=self.decay, offset=self.offset, alpha=self.alpha, eta=self.eta,
update_every=self.update_every, eval_every=self.eval_every, gamma_threshold=self.gamma_threshold, serialized=self.serialized,
serialization_path=self.serialization_path, minimum_probability=self.minimum_probability, random_state=self.random_state)
self.gensim_model.update(corpus=X, author2doc=author2doc, doc2author=doc2author)
return self
|
lgpl-2.1
|
chrisburr/scikit-learn
|
sklearn/feature_extraction/text.py
|
7
|
50272
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
Chandra-MARX/marx-test
|
tests/positions.py
|
1
|
21371
|
'''
In every |marx| simulation, one or more sources are placed at some sky position.
|marx| simulates photons coming from that position, traces them through the
mirror and gratings and finally places them on the chip. With a known
aspect solution, chip coordinates can then be transformed back to sky
coordinates. In general, this will not recover the exact sky position where a
photon started out. A big part of that is scatter in the mirrors, which blurs
the image (see :ref:`sect-tests.PSF` for tests of the PSF).
However, with a large number of photons, we can fit the average position which
should be close to the real sky position.
In real observations, other factors contribute, such as the finite
resolution of the detectors (|marx| usually takes that into account, but it can
be switched of through the ``--pixadj="EXACT"`` switch in :marxtool:`marx2fits`)
and the uncertainty of the aspect solution.
Within a single observation, positions will be less certain for fainter sources
(due to Poisson statistics) and for sources at a larger off-axis angles (due to the
larger PSF).
'''
import shutil
import subprocess
import os
from collections import OrderedDict
from marxtest import base
from marxtest.process_utils import marxpars_from_asol
title = 'Coordinates on the sky and the chip'
tests = ['ONC', 'RegularGrid', 'RegularGridHRCI']
class ONC(base.MarxTest):
'''The `Orion Nebula Cluster (ONC) <http://simbad.u-strasbg.fr/simbad/sim-basic?Ident=onc>`_
is a dense star forming region with about 1600 X-ray sources observed
in the COUP survey by
`Getman et al (2005) <http://adsabs.harvard.edu/abs/2005ApJS..160..319G>`_ .
We simulate this field with |marx| and then run a source detection to check
how well we recover the input coordinates. This will depend on the number
of counts detected and the position in the field.
To simplify the simulation input, we assume that all sources have flat
lightcurves and are
monoenergetic at the observed mean energy (the energy matters because
the effective area is energy dependent and so is the PSF).
We write a short C code that reads an input coordiante list and generates
the photons in this manner. We compile
the code, and call it as a :ref:`sect-usersource`.
'''
title = 'Chandra Orion Ultradeep project'
obsid = 3744
figures = OrderedDict([('ds9', {'alternative': '',
'caption': '`ds9`_ image of the observed data (left) and simulation (right). The sources detected in the simulation are overlayed. There are few cases where the read-out streak is identified as source or where two close sources are detected as one larger resolved source. The COUP catalog used as input is based on much longer merged observations and has been checked against optical and IR observations to remove such spurious detections.'}),
('dist', {'alternative': 'Scatter plot with distance from aimpoint vs coordinate error in the fit.',
'caption': 'Apart from a few outliers close to the aimpoint (mostly confused sources, see above), the distribution of coordinate errors follows spreads out with increasing distance, i.e. size of the PSF.'})
])
summary='For this field, we know the true input coordinates so we can check how well |marx| reproduces those. In the center of the field (about one armin) the coordiante error is less than the size of an ACIS pixel for all sources and the average error never grows much beyond 1 ACIS pixel even for far off-axis source. The upper envelope of the distribution of errors is approximate linear and reaches 1 arcsec at a distance of 200 arcsec. No strong correlation of coordiante error and count rate of the source is apparent, indicating that the dominant error is not just due to Poisson counting statistics.'
@base.Python
def step_2(self):
'''Make input coordinate table
Coordinates are relative to pointing direction in arcmin'''
import os
from astropy.table import Table
from astropy.io import fits
asolfile = self.get_data_file('asol')
asol = fits.getheader(asolfile, 1)
coup = Table.read(os.path.join(self.pkg_data, 'COUP.tsv'),
format='ascii.fast_tab')
tab = Table()
tab['RA'] = (coup['RAJ2000'] - asol['RA_NOM']) * 60
tab['DEC'] = (coup['DEJ2000'] - asol['DEC_NOM']) * 60
tab['weight'] = 10**(coup['Lt'] - 27)
tab['energy'] = coup['<E>']
tab.write('coup.marxin', format='ascii.no_header', overwrite=True)
@base.CCode
def step_5(self):
'''C code for a grid of sources.
(``user.h`` and ``jdmath.h`` are shipped with |marx|.)'''
ccode=r'''
#include <stdio.h>
#include <stdlib.h>
#include <jdmath.h>
#include "user.h"
/* This user source implements many point sources via a file that
* specifies the source positions and energies. The current implementation
* assumes the format:
* RA Dec weight energy
* Here RA, Dec specifiy the source position, weight specifies the strength
* of the source in relation to the others.
*/
typedef struct
{
double cosx, cosy, cosz;
double weight;
double energy;
}
Point_Source_Type;
static unsigned int Num_Points;
static Point_Source_Type *Point_Sources;
static unsigned int Max_Num_Points;
static char *do_realloc (char *p, unsigned int len)
{
if (p == NULL)
p = malloc (len);
else
p = realloc (p, len);
if (p == NULL)
fprintf (stderr, "Not enough memory\n");
return p;
}
static void free_sources (void)
{
if (Point_Sources == NULL)
return;
free ((char *) Point_Sources);
Point_Sources = NULL;
}
static int add_source (double ra, double dec, double weight, double energy)
{
Point_Source_Type *p;
double cosx, cosy, cosz;
/* Convert to God's units from arc-min */
ra = ra * (PI/(180.0 * 60.0));
dec = dec * (PI/(180.0 * 60.0));
if (Max_Num_Points == Num_Points)
{
Max_Num_Points += 32;
p = (Point_Source_Type *)do_realloc ((char *)Point_Sources, Max_Num_Points * sizeof (Point_Source_Type));
if (p == NULL)
{
free_sources ();
return -1;
}
Point_Sources = p;
}
p = Point_Sources + Num_Points;
/* Note the the minus sign is to generate a vector pointing from the
* source to the origin
*/
p->cosx = -cos (dec) * cos (ra);
p->cosy = -cos (dec) * sin(ra);
p->cosz = -sin (dec);
p->weight = weight;
p->energy = energy;
Num_Points += 1;
return 0;
}
static void normalize_sources (void)
{
double total;
unsigned int i;
total = 0;
for (i = 0; i < Num_Points; i++)
{
Point_Sources[i].weight += total;
total = Point_Sources[i].weight;
}
for (i = 0; i < Num_Points; i++)
Point_Sources[i].weight /= total;
/* Make sure no round-off error affects the weight of the last point */
Point_Sources[Num_Points - 1].weight = 1.0;
}
int user_open_source (char **argv, int argc, double area,
double cosx, double cosy, double cosz)
{
FILE *fp;
char line[1024];
char *file;
unsigned int linenum;
file = argv[0];
if (file == NULL)
{
fprintf (stderr, "UserSource Model requires FILE as argument\n");
return -1;
}
fp = fopen (file, "r");
if (fp == NULL)
{
fprintf (stderr, "Unable to open %s\n", file);
return -1;
}
linenum = 0;
while (NULL != fgets (line, sizeof (line), fp))
{
double ra, dec, weight, energy;
linenum++;
if (4 != sscanf (line, "%lf %lf %lf %lf", &ra, &dec, &weight, &energy))
continue;
if (weight <= 0.0)
{
fprintf (stderr, "weight on line %d of %s must be positive\n",
linenum, file);
free_sources ();
return -1;
}
if (-1 == add_source (ra, dec, weight, energy))
{
fclose (fp);
return -1;
}
}
fclose (fp);
if (Num_Points == 0)
{
fprintf (stderr, "%s contains no sources\n", file);
return -1;
}
normalize_sources ();
return 0;
}
void user_close_source (void)
{
free_sources ();
}
int user_create_ray (double *delta_t, double *energy,
double *cosx, double *cosy, double *cosz)
{
double r;
Point_Source_Type *p;
p = Point_Sources;
r = JDMrandom ();
while (r > p->weight)
p++;
*delta_t = -1.0;
*energy = p->energy;
*cosx = p->cosx;
*cosy = p->cosy;
*cosz = p->cosz;
return 0;
}
int main (int a, char **b)
{
(void) a;
(void) b;
return 1;
}
'''
return 'pnts.c', ccode
@base.Python
def step_6(self):
'''compile USER code
|marx| ships with a few examples of user sources. We pick one
of them, copy them to the right directory and compile it with gcc.
'''
marxpath = self.conf.get('marx', 'path')
src = os.path.join(marxpath,
'share', 'doc', 'marx', 'examples', 'user-source')
shutil.copy(os.path.join(src, 'user.h'),
os.path.join(self.basepath, 'user.h'))
jdmath_h = os.path.join(marxpath, 'include')
jdmath_a = os.path.join(marxpath, 'lib', 'libjdmath.a')
subprocess.call(['gcc',
'-shared', 'pnts.c', '-o', 'pnts.so', '-fPIC',
'-I' + jdmath_h, jdmath_a])
@base.Shell
def step_7(self):
'''Unzip fits file.
MARX cannot read zipped fits files, so we need to unzip the .fits.gz asol
files that we downloaded from the archive. On the other hand, `CIAO`_
tools work on both zipped or unzipped files, so there is no need to
unzip all of them, just the files that MARX reads as input.
'''
asol = self.get_data_file('asol')
return [f'gunzip -f {asol}']
@base.Marx
def step_8(self):
'''run marx USER source matching observation'''
asol = self.get_data_file('asol')
evt = self.get_data_file('evt2')
pars = marxpars_from_asol(self.conf, asol, evt)
pars['OutputDir'] = 'COUP'
pars['SourceType'] = 'USER'
pars['UserSourceFile'] = os.path.join(self.basepath, 'pnts.so')
pars['UserSourceArgs'] = os.path.join(self.basepath, 'coup.marxin')
return pars
@base.Marx2fits
def step_9(self):
'turn into fits file'
return '--pixadj=EDSER', 'COUP', 'COUP.fits'
@base.Ciao
def step_10(self):
'''ds9 image of the PSF
In the observation, the brightest sources are piled-up. We don't bother
simulating this here, so we just set the scaling limits to bring out
the fainter details and ignore the bright peaks.
'''
return ['''ds9 -log -cmap heat {0} COUP.fits -scale limits 0 2000 -frame 1 -regions command 'text 5:35:15 -5:22:09 # text=Observation font="helvetica 24"' -frame 2 -regions command 'text 5:35:15 -5:22:09 # text=MARX font="helvetica 24"' -region load src.fits -saveimage {1} -exit'''.format(self.get_data_file("evt2"), self.figpath(list(self.figures.keys())[0]))]
@base.Ciao
def step_11(self):
'''Source detection'''
out = ['dmcopy "COUP.fits[EVENTS][bin x=2500:5500:2,y=2500:5500:2]" im.fits option=image clobber=yes',
'mkpsfmap im.fits psf.map 1.4 ecf=0.5',
'celldetect im.fits src.fits psffile=psf.map clobber=yes'
]
return out
@base.Python
def step_15(self):
'''Check position of detected sources'''
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.io import fits
src = Table.read('src.fits')
srcin = Table.read(os.path.join(self.pkg_data, 'COUP.tsv'),
format='ascii.fast_tab')
src_co = SkyCoord(src['RA'], src['DEC'], unit='deg')
srcin_co = SkyCoord(srcin['RAJ2000'], srcin['DEJ2000'], unit='deg')
idx, d2d, d3d = src_co.match_to_catalog_sky(srcin_co)
asolfile = self.get_data_file('asol')
asol = fits.getheader(asolfile, 1)
cen = SkyCoord(asol['RA_NOM'], asol['DEC_NOM'], unit='deg')
d = cen.separation(src_co).arcsec
fig = plt.figure()
ax1 = plt.subplot(111)
scat1 = ax1.scatter(d, d2d.arcsec, c=np.log10(src['NET_COUNTS']), lw=1)
ax1.set_xlabel('distance from aimpoint [arcsec]')
ax1.set_ylabel('coordinate error [arcsec]')
ax1.set_xlim([0, 350])
ax1.set_ylim([0, 2])
cbar1 = fig.colorbar(scat1, ax=ax1)
cbar1.set_label('log(net counts per source)')
fig.savefig(self.figpath(list(self.figures.keys())[1]))
class RegularGrid(base.MarxTest):
'''In this example we place a radial grid of sources on the sky. Each source
emits an equal number of photons (exactly, no Poisson statistics) so that
we can compare the accuracy of the position we recover. Note that the
*detected* number of photons will be smaller for off-axis photons because
of vignetting!
We write a short C code that generates the photons in this manner, compile
it, and call is as a :ref:`sect-usersource`.
'''
DetectorType = 'ACIS-I'
title = 'Regular Grid (ACIS)'
figures = OrderedDict([('ds9', {'alternative': 'Sources positioned like knots in a spider web.',
'caption': '`ds9`_ image of the simulation. The size of the PSF increases further away from the aimpoint.'}),
('hist', {'alternative': 'Plot is described in the caption.',
'caption': '*left*: The error in the position (measured radially to the optical axis) increases with the distance to the optical axis. One part of this is just that the effective area and thus the number of counts decreases. There is also a systematic trend where sources at larger off-acis angle are systematically fitted too close to the center. Further investigation is necessary to check if this is a problem of |marx| related or :ciao:`celldetect`. In any case, the typical offset is below 0.2 arcsec, which is less then half a pixel in ACIS. *right*: Difference in position angle between input and fit. (Outliers beyond the plot range are not shown.)'})
])
summary = 'The input position is typically recovered to much better than one pixel for sources with a few hundred counts. There is a small systematic trend that needs to be studied further.'
@base.CCode
def step_5(self):
'''C code for a grid of sources.
(``user.h`` is shipped with |marx|.)'''
ccode='''
#include <stdio.h>
#include <math.h>
#include "user.h"
static double Source_CosX;
static double Source_CosY;
static double Source_CosZ;
int user_open_source (char **argv, int argc, double area,
double cosx, double cosy, double cosz)
{
return 0;
}
void user_close_source (void)
{
}
static double To_Radians = (M_PI / 180.0 / 3600.0);
#define ARC_SECONDS_PER_CELL 50
#define ANGULAR_STEPS 16
int user_create_ray (double *delta_t, double *energy,
double *cosx, double *cosy, double *cosz)
{
static int last_i = 0;
static int last_j = 0;
double theta, phi;
double cos_theta, sin_theta;
if (last_j == ANGULAR_STEPS){
last_j = 0;
last_i++;
}
if (last_i == 20) last_i = 0;
theta = To_Radians * last_i * ARC_SECONDS_PER_CELL;
phi = (10. /180 * M_PI) + last_j * 2 * M_PI / ANGULAR_STEPS;
sin_theta = sin(theta);
*cosx = -cos (theta);
*cosy = sin_theta * cos (phi);
*cosz = sin_theta * sin (phi);
*delta_t = -1.0;
*energy = -1.0;
if (last_i ==0){
last_i++;
}
else {
last_j++;
}
return 0;
}
int main (int a, char **b)
{
(void) a;
(void) b;
return 1;
}'''
return 'radialgrid.c', ccode
@base.Python
def step_6(self):
'''compile USER code'''
marxpath = self.conf.get('marx', 'path')
src = os.path.join(marxpath, 'share', 'doc', 'marx', 'examples',
'user-source', 'user.h')
shutil.copy(os.path.join(src),
os.path.join(self.basepath, 'user.h'))
subprocess.call(['gcc', '-lm', '-fPIC',
'-shared', 'radialgrid.c', '-o', 'radialgrid.so'])
@base.Marx
def step_7(self):
'''run USER source'''
return {'SourceType': 'USER', 'OutputDir': 'points',
'GratingType': 'NONE',
'SourceRA': 90., 'SourceDEC': 0.,
'RA_Nom': 90., 'Dec_Nom': 0, 'Roll_Nom': 0,
'DetectorType': self.DetectorType,
'UserSourceFile': os.path.join(self.basepath, 'radialgrid.so'),
'NumRays': -100000, 'ExposureTime': 0}
@base.Marx2fits
def step_8(self):
'turn into fits file'
return '--pixadj=EDSER', 'points', 'points.fits'
@base.Ciao
def step_10(self):
'''ds9 image of the PSF'''
return ['''ds9 -width 500 -height 500 -log -cmap heat points.fits -pan to 4097 4097 physical -zoom 0.5 -bin factor 2 -grid -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
@base.Ciao
def step_11(self):
'''Source detection'''
out = ['dmcopy "points.fits[EVENTS][bin x=3000:5100:2,y=3000:5100:2]" im.fits option=image clobber=yes',
'mkpsfmap im.fits psf.map 1.4 ecf=0.5 clobber=yes',
'celldetect im.fits src.fits psffile=psf.map clobber=yes'
]
return out
@base.Python
def step_15(self):
'''Check position of detected sources'''
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.coordinates import SkyCoord
src = Table.read('src.fits')
# Find distance from input position.
src['RA_INPUT'] = src['RA'] - (src['RA'] // (360./16.)) * (360./16.) - 10.
# Problem: Might expect source at 1.0,
# but measure at 0.99. In this case distance to next lower source
# is 0.99. Thus shift input by 0.005 (about 50 arcsec / 2)
# before integer devision
src['DEC_INPUT'] = src['DEC'] - ((0.005 + src['DEC']) // (50./3600.)) * (50./3600.)
cen = SkyCoord(90., 0, unit='deg')
det = SkyCoord(src['RA'], src['DEC'], unit='deg')
d = cen.separation(det).arcsec
d_err = np.mod(d + 10, 50.) - 10
ang = cen.position_angle(det).degree
# Subtract offset that we placed in the C code to avoid 0./360. ambiguity
# Step width is 360./16 = 22.5 deg
# Offset is 10 deg. Complement we find here is 12.5 deg.
ang = ang - 12.5
ang_err = np.mod(ang + 2, 360. / 16.) - 2
ind = d > 10
fig = plt.figure(figsize=(8, 4))
ax1 = plt.subplot(121)
scat1 = ax1.scatter(d, d_err, c=src['NET_COUNTS'], lw=1)
ax1.set_xlabel('distance [arcsec]')
ax1.set_ylabel('distance error [arcsec]')
ax1.set_xlim([-10, 620])
ax1.set_ylim([-1, 0.5])
ax2 = plt.subplot(122)
scat2 = ax2.scatter(ang, ang_err, c=src['NET_COUNTS'], lw=1)
ax2.set_xlabel('pos ang [deg]')
ax2.set_ylabel('pos ang error [deg]')
ax2.set_xlim([-5, 350])
ax2.set_ylim([-0.3, 0.3])
cbar2 = fig.colorbar(scat2, ax=ax2)
cbar2.set_label('net counts per source')
fig.savefig(self.figpath(list(self.figures.keys())[1]))
class RegularGridHRCI(RegularGrid):
'''Same as above, but with HRC-I as a detector.
The field-of-view for the HRC-I is larger for than for ACIS-I, but the PSF becomes
very large at large off-axis angles and thus the positional uncertainty
will be so large that a comparison to |marx| is no longer helpful to test
the accuracy of the |marx| simulations.
'''
figures = OrderedDict([('ds9', {'alternative': 'Sources positioned like knots in a spider web. The image is very similar to the previous ACIS example.',
'caption': '`ds9`_ image of the simulation. The size of the PSF increases further away from the aimpoint.'}),
('hist', {'alternative': 'Plot is described in the caption.',
'caption': 'See previous example. The same trends are visible with a slightly larger scatter.'})
])
summary = 'In the central few arcmin the input position is typically recovered to better than 0.2 pixels for sources with a few hundred counts.'
DetectorType = 'HRC-I'
title = 'Regular grid (HRC)'
@base.Ciao
def step_10(self):
'''ds9 image of the PSF'''
return ['''ds9 -width 500 -height 500 -log -cmap heat points.fits -pan to 16392 16392 physical -bin factor 16 -grid -saveimage {0} -exit'''.format(self.figpath(list(self.figures.keys())[0]))]
@base.Ciao
def step_11(self):
'''Source detection'''
out = ['dmcopy "points.fits[EVENTS][bin x=8500:24500:8,y=8500:24500:8]" im.fits option=image clobber=yes',
'mkpsfmap im.fits psf.map 1.4 ecf=0.5 clobber=yes',
'celldetect im.fits src.fits psffile=psf.map clobber=yes'
]
return out
|
gpl-2.0
|
velizarefremov/vascularnetworks
|
scores_srx.py
|
1
|
1651
|
from sklearn.metrics import f1_score,precision_score,recall_score,precision_recall_curve
from itkutilities import get_itk_array
import numpy as np
import sys
def call_f1scores(grdtruth,prediction,masks):
prec,rec,thres = precision_recall_curve(grdtruth.flatten(),prediction.flatten())
f1 = (2*prec*rec)/(prec+rec)
armax = np.argmax(f1)
max_thres = thres[armax]
img1 = np.asarray(prediction > max_thres,dtype=int)
print('threshold :',max_thres)
print('dice:',f1[armax])
# print 'accuracy:',1-(np.sum(np.asarray(img1!=grdtruth,dtype=int)))
if __name__ == '__main__':
scores = []#np.zeros(20)
labels = []#np.zeros(20)
preds = []#np.zeros(20)
masks = []#np.zeros(20)
for num in range(int(sys.argv[1]), int(sys.argv[2])):
d = "%01d" % num
labels.append(get_itk_array('labels_srxray/'+d+'.nii.gz'))
preds.append(get_itk_array('confmaps_matthias/'+d+'.mhd'))
# pred = np.asarray(pred/128,dtype='int32')
# pred = np.flipud(np.fliplr(pred))
# print np.unique(label),np.unique(pred)
# print np.mean(label==pred)
#print f1_score(label.flatten(),pred.flatten())
#print precision_score(label.flatten(),pred.flatten())
#print recall_score(label.flatten(),pred.flatten())
#f1 = f1_score(label.flatten(),pred.flatten(),sample_weight=mask.flatten())
#precision = precision_score(label.flatten(),pred.flatten(),sample_weight=mask.flatten())
#recall = recall_score(label.flatten(),pred.flatten(),sample_weight=mask.flatten())
#print num, " : ", f1
#scores[num-1] = f1
labels = np.array(labels)
preds = np.array(preds)
call_f1scores(labels,preds,masks)
#print scores
|
gpl-3.0
|
dkushner/zipline
|
zipline/finance/risk/period.py
|
17
|
11952
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from six import iteritems
from zipline.finance import trading
import pandas as pd
from . import risk
from . risk import (
alpha,
check_entry,
downside_risk,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns,
benchmark_returns=None,
algorithm_leverages=None):
treasury_curves = trading.environment.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = trading.environment.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.trading_day_counts = pd.stats.moments.rolling_count(
self.algorithm_returns, self.num_trading_days)
self.mean_algorithm_returns = pd.Series(
index=self.algorithm_returns.index)
for dt, ret in self.algorithm_returns.iteritems():
self.mean_algorithm_returns[dt] = (
self.algorithm_returns[:dt].sum() /
self.trading_day_counts[dt])
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date
)
self.sharpe = self.calculate_sharpe()
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = trading.environment.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
mar = downside_risk(self.algorithm_returns,
self.mean_algorithm_returns,
self.num_trading_days)
# Hold on to downside risk for debugging purposes.
self.downside_risk = mar
return sortino_ratio(self.algorithm_period_returns,
self.treasury_period_return,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%, if returns are
# greater than -1.0 it will throw an error because you cannot take
# the log of a negative number
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages)
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__) if
(not k.startswith('_') and not k == 'treasury_curves')}
STATE_VERSION = 2
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 2
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsPeriod saved state \
is too old.")
self.__dict__.update(state)
self.treasury_curves = trading.environment.treasury_curves
|
apache-2.0
|
TomAugspurger/pandas
|
pandas/tests/base/test_unique.py
|
1
|
3708
|
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.common import is_datetime64tz_dtype, needs_i8_conversion
import pandas as pd
import pandas._testing as tm
from pandas.tests.base.common import allow_na_ops
def test_unique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
result = obj.unique()
# dict.fromkeys preserves the order
unique_values = list(dict.fromkeys(obj.values))
if isinstance(obj, pd.MultiIndex):
expected = pd.MultiIndex.from_tuples(unique_values)
expected.names = obj.names
tm.assert_index_equal(result, expected)
elif isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
expected = expected.normalize()
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_unique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif len(obj) < 1:
pytest.skip("Test doesn't make sense on empty data")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
result = obj.unique()
unique_values_raw = dict.fromkeys(obj.values)
# because np.nan == np.nan is False, but None == None is True
# np.nan would be duplicated, whereas None wouldn't
unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)]
unique_values = [null_obj] + unique_values_not_null
if isinstance(obj, pd.Index):
expected = pd.Index(unique_values, dtype=obj.dtype)
if is_datetime64tz_dtype(obj.dtype):
result = result.normalize()
expected = expected.normalize()
elif isinstance(obj, pd.CategoricalIndex):
expected = expected.set_categories(unique_values_not_null)
tm.assert_index_equal(result, expected)
else:
expected = np.array(unique_values, dtype=obj.dtype)
tm.assert_numpy_array_equal(result, expected)
def test_nunique(index_or_series_obj):
obj = index_or_series_obj
obj = np.repeat(obj, range(1, len(obj) + 1))
expected = len(obj.unique())
assert obj.nunique(dropna=False) == expected
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_nunique_null(null_obj, index_or_series_obj):
obj = index_or_series_obj
if not allow_na_ops(obj):
pytest.skip("type doesn't allow for NA operations")
elif isinstance(obj, pd.MultiIndex):
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
values = obj.values
if needs_i8_conversion(obj.dtype):
values[0:2] = iNaT
else:
values[0:2] = null_obj
klass = type(obj)
repeated_values = np.repeat(values, range(1, len(values) + 1))
obj = klass(repeated_values, dtype=obj.dtype)
if isinstance(obj, pd.CategoricalIndex):
assert obj.nunique() == len(obj.categories)
assert obj.nunique(dropna=False) == len(obj.categories) + 1
else:
num_unique_values = len(obj.unique())
assert obj.nunique() == max(0, num_unique_values - 1)
assert obj.nunique(dropna=False) == max(0, num_unique_values)
|
bsd-3-clause
|
dkriegner/xrayutilities
|
examples/simpack_xrd_AlGaAs.py
|
1
|
3024
|
# This file is part of xrayutilities.
#
# xrayutilities is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Copyright (C) 2016 Dominik Kriegner <[email protected]>
import xrayutilities as xu
from matplotlib.pylab import *
mpl.rcParams['font.size'] = 16.0
en = 'CuKa1'
resol = 0.0001 # resolution in qz
H, K, L = (0, 0, 4)
qz = linspace(4.40, 4.50, 2000)
sub = xu.simpack.Layer(xu.materials.GaAs, inf)
lay = xu.simpack.Layer(xu.materials.AlGaAs(0.75), 995.64, relaxation=0.0)
# pseudomorphic stack -> adjusts lattice parameters!
pls = xu.simpack.PseudomorphicStack001('AlGaAs on GaAs', sub, lay)
# calculate incidence angle for dynamical diffraction models
qx = sqrt(sub.material.Q(H, K, L)[0]**2 + sub.material.Q(H, K, L)[1]**2)
ai = xu.simpack.coplanar_alphai(qx, qz, en)
resolai = abs(xu.simpack.coplanar_alphai(qx, mean(qz) + resol, en) -
xu.simpack.coplanar_alphai(qx, mean(qz), en))
# comparison of different diffraction models
# simplest kinematical diffraction model
mk = xu.simpack.KinematicalModel(pls, energy=en, resolution_width=resol)
Ikin = mk.simulate(qz, hkl=(H, K, L), refraction=True)
# kinematic multibeam diffraction model
mk = xu.simpack.KinematicalMultiBeamModel(pls, energy=en,
surface_hkl=(0, 0, 1),
resolution_width=resol)
Imult = mk.simulate(qz, hkl=(H, K, L), refraction=True)
# simplified dynamical diffraction model
mds = xu.simpack.SimpleDynamicalCoplanarModel(pls, energy=en,
resolution_width=resolai)
Idynsub = mds.simulate(ai, hkl=(H, K, L), idxref=0)
Idynlay = mds.simulate(ai, hkl=(H, K, L), idxref=1)
# general 2-beam theory based dynamical diffraction model
md = xu.simpack.DynamicalModel(pls, energy=en, resolution_width=resolai)
Idyn = md.simulate(ai, hkl=(H, K, L))
# plot of calculated intensities
figure('XU-simpack AlGaAs')
clf()
semilogy(qz, Ikin, label='kinematical')
semilogy(qz, Imult, label='multibeam')
semilogy(xu.simpack.get_qz(qx, ai, en), Idynsub, label='simpl. dynamical(S)')
semilogy(xu.simpack.get_qz(qx, ai, en), Idynlay, label='simpl. dynamical(L)')
semilogy(xu.simpack.get_qz(qx, ai, en), Idyn, label='full dynamical')
vlines([4*2*pi/l.material.a3[-1] for l in pls], 1e-6, 1, linestyles='dashed')
legend(fontsize='small')
xlim(qz.min(), qz.max())
xlabel(r'Qz ($1/\mathrm{\AA}$)')
ylabel('Intensity (arb. u.)')
tight_layout()
show()
|
gpl-2.0
|
maxalbert/bokeh
|
bokeh/charts/attributes.py
|
2
|
13733
|
from __future__ import absolute_import
from copy import copy
from itertools import cycle
import pandas as pd
from bokeh.charts import DEFAULT_PALETTE
from bokeh.charts.properties import ColumnLabel
from bokeh.charts.utils import marker_types
from bokeh.charts.data_source import ChartDataSource
from bokeh.charts.stats import Bins
from bokeh.enums import DashPattern
from bokeh.models.sources import ColumnDataSource
from bokeh.properties import (HasProps, String, List, Instance, Either, Any, Dict,
Color, Bool, Override)
class AttrSpec(HasProps):
"""A container for assigning attributes to values and retrieving them as needed.
A special function this provides is automatically handling cases where the provided
iterator is too short compared to the distinct values provided.
Once created as attr_spec, you can do attr_spec[data_label], where data_label must
be a one dimensional tuple of values, representing the unique group in the data.
See the :meth:`AttrSpec.setup` method for the primary way to provide an existing
AttrSpec with data and column values and update all derived property values.
"""
id = Any()
data = Instance(ColumnDataSource)
name = String(help='Name of the attribute the spec provides.')
columns = Either(ColumnLabel, List(ColumnLabel), help="""
The label or list of column labels that correspond to the columns that will be
used to find all distinct values (single column) or combination of values (
multiple columns) to then assign a unique attribute to. If not enough unique
attribute values are found, then the attribute values will be cycled.
""")
default = Any(default=None, help="""
The default value for the attribute, which is used if no column is assigned to
the attribute for plotting. If the default value is not provided, the first
value in the `iterable` property is used.
""")
attr_map = Dict(Any, Any, help="""
Created by the attribute specification when `iterable` and `data` are
available. The `attr_map` will include a mapping between the distinct value(s)
found in `columns` and the attribute value that has been assigned.
""")
items = List(Any, default=None, help="""
The attribute specification calculates this list of distinct values that are
found in `columns` of `data`.
""")
sort = Bool(default=True, help="""
A boolean flag to tell the attribute specification to sort `items`, when it is
calculated. This affects which value of `iterable` is assigned to each distinct
value in `items`.
""")
ascending = Bool(default=True, help="""
A boolean flag to tell the attribute specification how to sort `items` if the
`sort` property is set to `True`. The default setting for `ascending` is `True`.
""")
bins = Instance(Bins, help="""
If an attribute spec is binning data, so that we can map one value in the
`iterable` to one value in `items`, then this attribute will contain an instance
of the Bins stat. This is used to create unique labels for each bin, which is
then used for `items` instead of the actual unique values in `columns`.
""")
def __init__(self, columns=None, df=None, iterable=None, default=None,
items=None, **properties):
"""Create a lazy evaluated attribute specification.
Args:
columns: a list of column labels
df(:class:`~pandas.DataFrame`): the data source for the attribute spec.
iterable: an iterable of distinct attribute values
default: a value to use as the default attribute when no columns are passed
items: the distinct values in columns. If items is provided as input,
then the values provided are used instead of being calculated. This can
be used to force a specific order for assignment.
**properties: other properties to pass to parent :class:`HasProps`
"""
properties['columns'] = self._ensure_list(columns)
if df is not None:
properties['data'] = ColumnDataSource(df)
if default is None and iterable is not None:
default_iter = copy(iterable)
properties['default'] = next(iter(default_iter))
elif default is not None:
properties['default'] = default
if iterable is not None:
properties['iterable'] = iterable
if items is not None:
properties['items'] = items
super(AttrSpec, self).__init__(**properties)
@staticmethod
def _ensure_list(attr):
"""Always returns a list with the provided value. Returns the value if a list."""
if isinstance(attr, str):
return [attr]
elif isinstance(attr, tuple):
return list(attr)
else:
return attr
@staticmethod
def _ensure_tuple(attr):
"""Return tuple with the provided value. Returns the value if a tuple."""
if not isinstance(attr, tuple):
return (attr,)
else:
return attr
def _setup_default(self):
"""Stores the first value of iterable into `default` property."""
self.default = next(self._setup_iterable())
def _setup_iterable(self):
"""Default behavior is to copy and cycle the provided iterable."""
return cycle(copy(self.iterable))
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if self.sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
df = df.sort_values(by=columns, ascending=self.ascending)
except AttributeError:
df = df.sort(columns=columns, ascending=self.ascending)
items = df[columns].drop_duplicates()
self.items = [tuple(x) for x in items.to_records(index=False)]
def _create_attr_map(self, df, columns):
"""Creates map between unique values and available attributes."""
self._generate_items(df, columns)
iterable = self._setup_iterable()
iter_map = {}
for item in self.items:
item = self._ensure_tuple(item)
iter_map[item] = next(iterable)
return iter_map
def set_columns(self, columns):
"""Set columns property and update derived properties as needed."""
columns = self._ensure_list(columns)
if all([col in self.data.column_names for col in columns]):
self.columns = columns
else:
# we have input values other than columns
# assume this is now the iterable at this point
self.iterable = columns
self._setup_default()
def setup(self, data=None, columns=None):
"""Set the data and update derived properties as needed."""
if data is not None:
self.data = data
if columns is not None:
self.set_columns(columns)
if self.columns is not None and self.data is not None:
self.attr_map = self._create_attr_map(self.data.to_df(), self.columns)
def __getitem__(self, item):
"""Lookup the attribute to use for the given unique group label."""
if not self.columns or not self.data or item is None:
return self.default
elif self._ensure_tuple(item) not in self.attr_map.keys():
# make sure we have attr map
self.setup()
return self.attr_map[self._ensure_tuple(item)]
class ColorAttr(AttrSpec):
"""An attribute specification for mapping unique data values to colors.
.. note::
Should be expanded to support more complex coloring options.
"""
name = Override(default='color')
iterable = List(Color, default=DEFAULT_PALETTE)
bin = Bool(default=False)
def __init__(self, **kwargs):
iterable = kwargs.pop('palette', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(ColorAttr, self).__init__(**kwargs)
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(source=ColumnDataSource(df), column=columns[0],
bin_count=len(self.iterable), aggregate=False)
if self.sort:
self.bins.sort(ascending=self.ascending)
self.items = [bin.label[0] for bin in self.bins]
else:
raise ValueError('Binned colors can only be created for one column of \
numerical data.')
def add_bin_labels(self, data):
col = self.columns[0]
# save original values into new column
data._data[col + '_values'] = data._data[col]
for bin in self.bins:
# set all rows associated to each bin to the bin label being mapped to colors
data._data.ix[data._data[col + '_values'].isin(bin.values),
col] = bin.label[0]
data._data[col] = pd.Categorical(data._data[col], categories=list(self.items),
ordered=self.sort)
class MarkerAttr(AttrSpec):
"""An attribute specification for mapping unique data values to markers."""
name = Override(default='marker')
iterable = List(String, default=list(marker_types.keys()))
def __init__(self, **kwargs):
iterable = kwargs.pop('markers', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(MarkerAttr, self).__init__(**kwargs)
dashes = DashPattern._values
class DashAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
name = Override(default='dash')
iterable = List(String, default=dashes)
def __init__(self, **kwargs):
iterable = kwargs.pop('dash', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(DashAttr, self).__init__(**kwargs)
class IdAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
name = 'id'
def _setup_iterable(self):
return iter(range(0, len(self.items)))
class CatAttr(AttrSpec):
"""An attribute specification for mapping unique data values to labels.
.. note::
this is a special attribute specification, which is used for defining which
labels are used for one aspect of a chart (grouping) vs another (stacking or
legend)
"""
name = Override(default='nest')
def __init__(self, **kwargs):
super(CatAttr, self).__init__(**kwargs)
def _setup_iterable(self):
return iter(self.items)
def get_levels(self, columns):
"""Provides a list of levels the attribute represents."""
if self.columns is not None:
levels = [columns.index(col) for col in self.columns]
return levels
else:
return []
""" Attribute Spec Functions
Convenient functions for producing attribute specifications. These would be
the interface used by end users when providing attribute specs as inputs
to the Chart.
"""
def color(columns=None, palette=None, bin=False, **kwargs):
"""Produces a ColorAttr specification for coloring groups of data based on columns.
Args:
columns (str or list(str), optional): a column or list of columns for coloring
palette (list(str), optional): a list of colors to use for assigning to unique
values in `columns`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `ColorAttr` object
"""
if palette is not None:
kwargs['palette'] = palette
kwargs['columns'] = columns
kwargs['bin'] = bin
return ColorAttr(**kwargs)
def marker(columns=None, markers=None, **kwargs):
""" Specifies detailed configuration for a marker attribute.
Args:
columns (list or str):
markers (list(str) or str): a custom list of markers. Must exist within
:data:`marker_types`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `MarkerAttr` object
"""
if markers is not None:
kwargs['markers'] = markers
kwargs['columns'] = columns
return MarkerAttr(**kwargs)
def cat(columns=None, cats=None, sort=True, ascending=True, **kwargs):
""" Specifies detailed configuration for a chart attribute that uses categoricals.
Args:
columns (list or str): the columns used to generate the categorical variable
cats (list, optional): overrides the values derived from columns
sort (bool, optional): whether to sort the categorical values (default=True)
ascending (bool, optional): whether to sort the categorical values (default=True)
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `CatAttr` object
"""
if cats is not None:
kwargs['cats'] = cats
kwargs['columns'] = columns
kwargs['sort'] = sort
kwargs['ascending'] = ascending
return CatAttr(**kwargs)
|
bsd-3-clause
|
xia2/xia2
|
src/xia2/cli/delta_cc_half.py
|
1
|
5298
|
import logging
import matplotlib
import os
import sys
from cctbx import crystal
from iotbx.reflection_file_reader import any_reflection_file
import libtbx.phil
from dials.util import tabulate
from dials.util.filter_reflections import filtered_arrays_from_experiments_reflections
from dials.util.multi_dataset_handling import (
assign_unique_identifiers,
parse_multiple_datasets,
)
from dials.util.options import OptionParser
from dials.util.options import flatten_experiments, flatten_reflections
from dials.util.version import dials_version
import xia2.Handlers.Streams
from xia2.Handlers.Citations import Citations
from xia2.Modules.Analysis import separate_unmerged
from xia2.Modules.DeltaCcHalf import DeltaCcHalf
matplotlib.use("Agg")
logger = logging.getLogger("xia2.delta_cc_half")
phil_scope = libtbx.phil.parse(
"""
include scope xia2.Modules.DeltaCcHalf.phil_scope
group_size = None
.type = int(value_min=1)
batch
.multiple = True
{
id = None
.type = str
range = None
.type = ints(size=2, value_min=0)
}
include scope xia2.Modules.MultiCrystalAnalysis.batch_phil_scope
output {
log = xia2.delta_cc_half.log
.type = path
}
""",
process_includes=True,
)
def run(args=sys.argv[1:]):
# Create the parser
parser = OptionParser(
# usage=usage,
phil=phil_scope,
read_reflections=True,
read_experiments=True,
check_format=False,
# epilog=help_message,
)
# Parse the command line
params, options, args = parser.parse_args(
args=args, show_diff_phil=False, return_unhandled=True
)
# Configure the logging
xia2.Handlers.Streams.setup_logging(
logfile=params.output.log, verbose=options.verbose
)
logger.info(dials_version())
# Log the diff phil
diff_phil = parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
if params.unit_cell is not None:
unit_cell = params.unit_cell
crystal_symmetry = crystal.symmetry(unit_cell=unit_cell)
else:
crystal_symmetry = None
if len(params.input.experiments):
experiments = flatten_experiments(params.input.experiments)
reflections = flatten_reflections(params.input.reflections)
reflections = parse_multiple_datasets(reflections)
if len(experiments) != len(reflections):
raise sys.exit(
"Mismatched number of experiments and reflection tables found: %s & %s."
% (len(experiments), len(reflections))
)
experiments, reflections = assign_unique_identifiers(experiments, reflections)
# transform models into miller arrays
intensities, batches = filtered_arrays_from_experiments_reflections(
experiments,
reflections,
outlier_rejection_after_filter=False,
partiality_threshold=0.99,
return_batches=True,
)
if args and os.path.isfile(args[0]):
result = any_reflection_file(args[0])
unmerged_intensities = None
batches_all = None
for ma in result.as_miller_arrays(
merge_equivalents=False, crystal_symmetry=crystal_symmetry
):
if ma.info().labels == ["I(+)", "SIGI(+)", "I(-)", "SIGI(-)"]:
assert ma.anomalous_flag()
unmerged_intensities = ma
elif ma.info().labels == ["I", "SIGI"]:
assert not ma.anomalous_flag()
unmerged_intensities = ma
elif ma.info().labels == ["BATCH"]:
batches_all = ma
assert batches_all is not None
assert unmerged_intensities is not None
sel = unmerged_intensities.sigmas() > 0
unmerged_intensities = unmerged_intensities.select(sel).set_info(
unmerged_intensities.info()
)
batches_all = batches_all.select(sel)
id_to_batches = None
if len(params.batch) > 0:
id_to_batches = {}
for b in params.batch:
assert b.id is not None
assert b.range is not None
assert b.id not in id_to_batches, "Duplicate batch id: %s" % b.id
id_to_batches[b.id] = b.range
separate = separate_unmerged(
unmerged_intensities, batches_all, id_to_batches=id_to_batches
)
batches = list(separate.batches.values())
intensities = list(separate.intensities.values())
result = DeltaCcHalf(
intensities,
batches,
n_bins=params.n_bins,
d_min=params.d_min,
cc_one_half_method=params.cc_one_half_method,
group_size=params.group_size,
)
logger.info(tabulate(result.get_table(), headers="firstrow"))
hist_filename = "delta_cc_hist.png"
logger.info("Saving histogram to %s" % hist_filename)
result.plot_histogram(hist_filename)
normalised_scores_filename = "normalised_scores.png"
logger.info("Saving normalised scores to %s" % normalised_scores_filename)
result.plot_normalised_scores(normalised_scores_filename)
Citations.cite("delta_cc_half")
for citation in Citations.get_citations_acta():
logger.info(citation)
|
bsd-3-clause
|
amolkahat/pandas
|
pandas/tests/scalar/interval/test_interval.py
|
3
|
7257
|
from __future__ import division
import numpy as np
from pandas import Interval, Timestamp, Timedelta
import pandas.core.common as com
import pytest
import pandas.util.testing as tm
@pytest.fixture
def interval():
return Interval(0, 1)
class TestInterval(object):
def test_properties(self, interval):
assert interval.closed == 'right'
assert interval.left == 0
assert interval.right == 1
assert interval.mid == 0.5
def test_repr(self, interval):
assert repr(interval) == "Interval(0, 1, closed='right')"
assert str(interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self, interval):
assert 0.5 in interval
assert 1 in interval
assert 0 not in interval
msg = "__contains__ not defined for two intervals"
with tm.assert_raises_regex(TypeError, msg):
interval in interval
interval_both = Interval(0, 1, closed='both')
assert 0 in interval_both
assert 1 in interval_both
interval_neither = Interval(0, 1, closed='neither')
assert 0 not in interval_neither
assert 0.5 in interval_neither
assert 1 not in interval_neither
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self, interval):
# should not raise
hash(interval)
@pytest.mark.parametrize('left, right, expected', [
(0, 5, 5),
(-2, 5.5, 7.5),
(10, 10, 0),
(10, np.inf, np.inf),
(-np.inf, -5, np.inf),
(-np.inf, np.inf, np.inf),
(Timedelta('0 days'), Timedelta('5 days'), Timedelta('5 days')),
(Timedelta('10 days'), Timedelta('10 days'), Timedelta('0 days')),
(Timedelta('1H10M'), Timedelta('5H5M'), Timedelta('3H55M')),
(Timedelta('5S'), Timedelta('1H'), Timedelta('59M55S'))])
def test_length(self, left, right, expected):
# GH 18789
iv = Interval(left, right)
result = iv.length
assert result == expected
@pytest.mark.parametrize('left, right, expected', [
('2017-01-01', '2017-01-06', '5 days'),
('2017-01-01', '2017-01-01 12:00:00', '12 hours'),
('2017-01-01 12:00', '2017-01-01 12:00:00', '0 days'),
('2017-01-01 12:01', '2017-01-05 17:31:00', '4 days 5 hours 30 min')])
@pytest.mark.parametrize('tz', (None, 'UTC', 'CET', 'US/Eastern'))
def test_length_timestamp(self, tz, left, right, expected):
# GH 18789
iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
result = iv.length
expected = Timedelta(expected)
assert result == expected
@pytest.mark.parametrize('left, right', [
('a', 'z'),
(('a', 'b'), ('c', 'd')),
(list('AB'), list('ab')),
(Interval(0, 1), Interval(1, 2))])
def test_length_errors(self, left, right):
# GH 18789
iv = Interval(left, right)
msg = 'cannot compute length between .* and .*'
with tm.assert_raises_regex(TypeError, msg):
iv.length
def test_math_add(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(1, 2, closed=closed)
result = interval + 1
assert result == expected
result = 1 + interval
assert result == expected
result = interval
result += 1
assert result == expected
msg = r"unsupported operand type\(s\) for \+"
with tm.assert_raises_regex(TypeError, msg):
interval + interval
with tm.assert_raises_regex(TypeError, msg):
interval + 'foo'
def test_math_sub(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(-1, 0, closed=closed)
result = interval - 1
assert result == expected
result = interval
result -= 1
assert result == expected
msg = r"unsupported operand type\(s\) for -"
with tm.assert_raises_regex(TypeError, msg):
interval - interval
with tm.assert_raises_regex(TypeError, msg):
interval - 'foo'
def test_math_mult(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(0, 2, closed=closed)
result = interval * 2
assert result == expected
result = 2 * interval
assert result == expected
result = interval
result *= 2
assert result == expected
msg = r"unsupported operand type\(s\) for \*"
with tm.assert_raises_regex(TypeError, msg):
interval * interval
msg = r"can\'t multiply sequence by non-int"
with tm.assert_raises_regex(TypeError, msg):
interval * 'foo'
def test_math_div(self, closed):
interval = Interval(0, 1, closed=closed)
expected = Interval(0, 0.5, closed=closed)
result = interval / 2.0
assert result == expected
result = interval
result /= 2.0
assert result == expected
msg = r"unsupported operand type\(s\) for /"
with tm.assert_raises_regex(TypeError, msg):
interval / interval
with tm.assert_raises_regex(TypeError, msg):
interval / 'foo'
def test_math_floordiv(self, closed):
interval = Interval(1, 2, closed=closed)
expected = Interval(0, 1, closed=closed)
result = interval // 2
assert result == expected
result = interval
result //= 2
assert result == expected
msg = r"unsupported operand type\(s\) for //"
with tm.assert_raises_regex(TypeError, msg):
interval // interval
with tm.assert_raises_regex(TypeError, msg):
interval // 'foo'
def test_constructor_errors(self):
msg = "invalid option for 'closed': foo"
with tm.assert_raises_regex(ValueError, msg):
Interval(0, 1, closed='foo')
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
Interval(1, 0)
@pytest.mark.parametrize('tz_left, tz_right', [
(None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')])
def test_constructor_errors_tz(self, tz_left, tz_right):
# GH 18538
left = Timestamp('2017-01-01', tz=tz_left)
right = Timestamp('2017-01-02', tz=tz_right)
error = TypeError if com._any_none(tz_left, tz_right) else ValueError
with pytest.raises(error):
Interval(left, right)
|
bsd-3-clause
|
rosenbrockc/dft
|
pydft/geometry.py
|
1
|
9366
|
"""Methods and classes for storing and manipulating the global
geometry of the physical problem.
"""
import numpy as np
from pydft.base import testmode
cell = None
"""Cell: default geometry to use globally throughout the code when no other
geometry is explicitly specified.
"""
def get_cell(cell_=None):
"""Returns the cell to use for calculations.
"""
if cell_ is not None:
return cell_
else:
return cell
def set_cell(cell_):
"""Sets the global cell to an already initialized instance.
Args:
cell_ (Cell): new global cell.
"""
from pydft.bases.fourier import reset_cache
reset_cache()
global cell
cell = cell_
def set_geometry(R, S, X=None, Z=1, grid="MP", f=2):
"""Sets the global geometry that is used by default in all calculations.
Args:
R (numpy.ndarray): column lattice vectors of the unit cell for the
problem.
S (numpy.ndarray): of `int`; defines how many times to divide
each of the lattice vectors when defining the descritizing
grid.
X (numpy.ndarray): of shape (N, 3), where `N` is the number of
nucleii in the unit cell.
Z (numpy.ndarray or int): specifying the size of charge on
each nucleus in `X`.
grid (str): one of ['MP', 'BCC']; defines the type of grid to use
for sampling *real* space unit cell.
f (int): number of electrons per orbital.
"""
from pydft.bases.fourier import reset_cache
reset_cache()
global cell
cell = Cell(R, S, X, Z, grid, f=f)
return cell
class Cell(object):
"""Represents the unit cell in real space *and* the corresponding
cell in reciprocal space.
Args:
R (numpy.ndarray): column lattice vectors of the unit cell for the
problem.
S (numpy.ndarray): of `int`; defines how many times to divide
each of the lattice vectors when defining the descritizing
grid.
X (numpy.ndarray): of shape (N, 3), where `N` is the number of
nucleii in the unit cell.
Z (numpy.ndarray or int): specifying the size of charge on
each nucleus in `X`.
grid (str): one of ['MP', 'BCC']; defines the type of grid to use
for sampling *real* space unit cell.
f (int): number of electrons per orbital.
Attributes:
R (numpy.ndarray): column lattice vectors of the unit cell for the
problem.
S (numpy.ndarray): of `int`; defines how many times to divide
each of the lattice vectors when defining the descritizing
grid.
X (numpy.ndarray): of shape (N, 3), where `N` is the number of
nucleii in the unit cell.
Z (numpy.ndarray or int): specifying the size of charge on
each nucleus in `X`.
vol (float): volume of the cell in real space.
f (int): number of electrons per orbital.
"""
def __init__(self, R, S, X=None, Z=1, grid="MP", f=2):
self.R = np.array(R)
self.S = np.array(S)
self.vol = np.linalg.det(self.R)
if X is None:
self.X = np.array([[0,0,0]])
else:
self.X = np.array(X)
self.Z = np.array([Z for i in range(len(self.X))])
self.f = f
self._M = None
"""numpy.ndarray: matrix of fractions used to define the points on which
the functions are sampled in the unit cell.
"""
self._N = None
"""numpy.ndarray: matrix of integers used in computing the Fourier transform of
the unit cell sample points.
"""
self._r = None
"""numpy.ndarray: points to sample the functions at in the unit cell.
"""
self._G = None
"""numpy.ndarray: sample points in reciprocal space.
"""
self._G2 = None
"""numpy.ndarray: magnitudes of the sample point vectors in reciprocal
space.
"""
self._K = None
"""numpy.ndarray: with shape (3, 3); holds the reciprocal lattice
vectors for the problem.
"""
self._Sf = None
"""numpy.ndarray: with length `self.X.shape[0]`; structure factors for
the nucleii in the cell.
"""
self._dr = None
"""numpy.ndarray: distance from the center of the cell to each
of the sample points.
"""
if grid != "MP":
raise NotImplementedError("Haven't got BCC sampling in place yet.")
@property
def dr(self):
"""Returns a matrix of the distance from the center of the
cell to each of the sample points.
"""
if self._dr is None:
center = np.sum(self.R, axis=1)/2.
self._dr = self.r - center
return self._dr
@property
def K(self):
"""Reciprocal lattice vectors for the problem. Has shape (3, 3).
"""
if self._K is None:
b1 = 2*np.pi*np.cross(self.R[:,1], self.R[:,2])/self.vol
b2 = 2*np.pi*np.cross(self.R[:,2], self.R[:,0])/self.vol
b3 = 2*np.pi*np.cross(self.R[:,0], self.R[:,1])/self.vol
self._K = np.vstack((b1, b2, b3)).T
return self._K
@property
def Sf(self):
"""Structure factor for the nuclei in the cell.
"""
if self._Sf is None:
self._Sf = np.sum(np.exp(-1j*np.dot(self.G, self.X.T)), axis=1)
return self._Sf
@property
def r(self):
"""Points to sample the functions at in the unit cell.
"""
if self._r is None:
Sinv = np.diag(1./self.S)
self._r = np.dot(self.M, np.dot(Sinv, self.R.T))
return self._r
@property
def G(self):
"""Sample points in reciprocal space.
"""
if self._G is None:
self._G = 2*np.pi*np.dot(self.N, np.linalg.inv(self.R))
return self._G
@property
def G2(self):
"""Magnitudes of the sample point vectors in reciprocal
space.
Returns:
numpy.ndarray: of length `np.prod(S)` with magnitude of each `G`
vector.
"""
if self._G2 is None:
self._G2 = np.linalg.norm(self.G, axis=1)**2
return self._G2
@property
def M(self):
"""Returns the :math:`M` matrix of integers that determine points at which the
functions are sampled in the unit cell.
Examples:
For `S = [2, 2, 1]`, the returned matrix is:
.. code-block:: python
np.ndarray([[0,0,0],
[1,0,0],
[0,1,0],
[1,1,0]], dtype=int)
"""
if self._M is None:
ms = np.arange(np.prod(self.S, dtype=int))
m1 = np.fmod(ms, self.S[0])
m2 = np.fmod(np.floor(ms/self.S[0]), self.S[1])
m3 = np.fmod(np.floor(ms/(self.S[0]*self.S[1])), self.S[2])
#Make sure we explicitly use an integer array; it's faster.
self._M = np.asarray(np.vstack((m1, m2, m3)).T, dtype=int)
return self._M
@property
def N(self):
""""Returns the :math:`N` matrix of integers used in computing the
Fourier transform of the unit cell sample points.
"""
if self._N is None:
result = []
for i in range(3):
odd = 1 if i % 2 == 1 else 0
m = np.ma.array(self.M[:,i], mask=(self.M[:,i] <= self.S[i]/2))
result.append(m-self.S[i])
self._N = np.array(result).T
return self._N
def _latvec_plot(self, R=True, withpts=False, legend=False):
"""Plots the lattice vectors (for real or reciprocal space).
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
vecs = self.R if R else self.K
for i in range(3):
steps = np.linspace(0, 1, np.floor(10*np.linalg.norm(vecs[:,i])))
Ri = vecs[:,i]
Ri.shape = (1, 3)
steps.shape = (len(steps), 1)
line = np.dot(steps, Ri)
ax.plot(line[:,0], line[:,1], line[:,2], label="R{0:d}".format(i+1))
if withpts:
pts = self.r if R else self.G
ax.scatter(pts[:,0], pts[:,1], pts[:,2], color='k')
if legend:
ax.legend()
return (fig, ax)
def plot(self, withpts=False):
"""Plots the unit cell.
Args:
withpts (bool): when True, the sampling points :attr:`r` are also
plotted.
"""
import matplotlib.pyplot as plt
fig, ax = self._latvec_plot(withpts=withpts)
plt.title("Real Lattice with Sampling Points")
if not testmode:
plt.show()
def gplot(self, withpts=False):
"""Plots the reciprocal lattice vectors.
Args:
withpts (bool): when True, the sampling points in reciprocal space will
also be plotted.
"""
import matplotlib.pyplot as plt
fig, ax = self._latvec_plot(R=False, withpts=withpts)
plt.title("Reciprocal Lattice with Sampling Points")
if not testmode:
plt.show()
|
mit
|
mugizico/scikit-learn
|
sklearn/ensemble/tests/test_base.py
|
284
|
1328
|
"""
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
|
bsd-3-clause
|
meren/anvio
|
anvio/mcgclassifier.py
|
2
|
35383
|
# -*- coding: utf-8
# pylint: disable=line-too-long
"""
Classes to classify genes based on coverages across metagenomes.
anvi-mcg-classifier is the default client using this module
"""
import os
import anvio
import numpy as np
import pandas as pd
import matplotlib
# TODO: according to the warning, this call to set the back-hand is meaningless
# I need to experiment to see what happens if I delete it.
matplotlib.use('pdf')
import anvio.utils as utils
import matplotlib.pyplot as plt
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from scipy import odr as odr
from anvio.mcgops import MCGPlots
from anvio.errors import ConfigError, FilesNPathsError
from matplotlib.backends.backend_pdf import PdfPages
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2018, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Alon Shaiber"
__email__ = "[email protected]"
run = terminal.Run()
progress = terminal.Progress()
pp = terminal.pretty_print
columns_for_samples_coverage_stats_dict = ['non_outlier_mean_coverage', 'non_outlier_coverage_std']
class MetagenomeCentricGeneClassifier:
def __init__(self, args, run=run, progress=progress):
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.output_file_prefix = A('output_file_prefix')
self.alpha = A('alpha')
self.collection_name = A('collection_name')
self.bin_id = A('bin_id')
self.bin_ids_file_path = A('bin_ids_file')
self.exclude_samples = A('exclude_samples')
self.include_samples = A('include_samples')
self.outliers_threshold = A('outliers_threshold')
self.zeros_are_outliers = A('zeros_are_outliers')
self.gen_figures = A('gen_figures')
self.overwrite_output_destinations = A('overwrite_output_destinations')
self.split_coverage_values_per_nt_dict = None
self.gene_level_coverage_stats_dict = None
self.gene_level_coverage_stats_dict_of_dataframes = None
self.profile_db = {}
self.coverage_values_per_nt = None
self.gene_coverages = {}
self.gene_detections = None
self.samples = None
self.positive_samples = []
self.number_of_positive_samples = None
self.negative_samples = {}
self.number_of_negative_samples = None
self.gene_class_df = {}
self.samples_detection_information = {}
self.gene_presence_absence_in_samples_initiated = False
self.gene_presence_absence_in_samples = None
self.additional_description = ''
self.total_length = None
self.samples_coverage_stats_dicts_was_initiated = False
self.samples_coverage_stats_dicts = {}
self.non_outlier_indices = {}
self.gene_coverage_consistency_dict = {}
self.gene_coverage_consistency_dict_initiated = False
self.samples_to_exclude = set([])
self.samples_to_include = set([])
self.write_output_to_files = None
if self.exclude_samples:
# check that there is a file like this
filesnpaths.is_file_exists(self.exclude_samples)
self.samples_to_exclude = set([l.split('\t')[0].strip() for l in open(self.exclude_samples, 'rU').readlines()])
if not self.samples_to_exclude:
raise ConfigError("You asked to exclude samples, but provided an empty list.")
run.info('Excluding Samples', 'The following samples will be excluded: %s' % self.samples_to_exclude,)
if self.include_samples:
# check that there is a file like this
filesnpaths.is_file_exists(self.include_samples)
self.samples_to_include = set([l.split('\t')[0].strip() for l in open(self.include_samples, 'rU').readlines()])
if not self.samples_to_include:
raise ConfigError("You provided an empty list of samples to include.")
run.info('Including Samples', 'The following samples will be included: %s' % self.samples_to_include,)
# run sanity check on all input arguments
self.sanity_check()
def init(self, gene_level_coverage_stats_dict=None, split_coverage_values_per_nt_dict=None, additional_description=None):
""" setting the dictionaries for gene coverage stats and for split coverage per nucleotide"""
if gene_level_coverage_stats_dict is None and split_coverage_values_per_nt_dict is None:
raise ConfigError("MCGC needs at least one of the following in order to work: "
"gene_level_coverage_stats_dict or/and split_coverage_values_per_nt_dict")
# We want to make sure these are empty in case we use "init" multiple times for different bins
self.coverage_values_per_nt = None
self.gene_class_df = {}
self.samples_detection_information = {}
self.gene_presence_absence_in_samples_initiated = False
self.gene_presence_absence_in_samples = None
self.samples_coverage_stats_dicts_was_initiated = False
self.samples_coverage_stats_dicts = {}
self.non_outlier_indices = {}
self.gene_coverage_consistency_dict = {}
self.gene_coverage_consistency_dict_initiated = False
self.gene_level_coverage_stats_dict = gene_level_coverage_stats_dict
self.split_coverage_values_per_nt_dict = split_coverage_values_per_nt_dict
if additional_description:
self.additional_description = '-' + additional_description
try:
samples = next(iter(self.gene_level_coverage_stats_dict.values())).keys()
except:
samples = next(iter(self.split_coverage_values_per_nt_dict.values())).keys()
self.init_samples(samples)
def sanity_check(self):
"""Basic sanity check for class inputs"""
if self.output_file_prefix:
filesnpaths.is_output_file_writable(self.output_file_prefix + '-additional-layers.txt', ok_if_exists=self.overwrite_output_destinations)
try:
if self.gen_figures:
plot_dir = self.output_file_prefix + '-nucleotide-coverage-distribution-plots'
os.makedirs(plot_dir, exist_ok=self.overwrite_output_destinations)
except FileExistsError as e:
raise FilesNPathsError("%s already exists, if you would like to overwrite it, then use -W (see help menu)." % plot_dir)
# checking alpha
if not isinstance(self.alpha, float):
raise ConfigError("alpha value must be a type float.")
# alpha must be a min of 0 and smaller than 0.5
if self.alpha < 0 or self.alpha >= 0.5:
raise ConfigError("alpha must be a minimum of 0 and smaller than 0.5")
if self.exclude_samples and self.include_samples:
raise ConfigError("You cannot use both --include-samples and --exclude-samples! Please choose one.")
def init_samples(self, samples_list):
""" Create the set of samples according to user input and store it in self.samples"""
# remove the samples that should be excluded
samples = set(samples_list) - self.samples_to_exclude
if self.include_samples:
samples_to_include_that_are_not_there = self.samples_to_include - samples
if samples_to_include_that_are_not_there:
raise ConfigError("You requested to include some samples that are not in the profile database. Here are the samples in the profile database: %s. "
"And here are the samples you requested, and that are not there: %s" % (samples, samples_to_include_that_are_not_there))
samples = self.samples_to_include
self.samples = samples
def init_gene_level_coverage_stats_dict_of_dataframes(self):
""" converts the dictionaries of gene_level_coverage_stats_dict to dataframes"""
self.gene_level_coverage_stats_dict_of_dataframes = {}
for key in ['mean_coverage', 'detection', 'non_outlier_mean_coverage', 'non_outlier_coverage_std']:
# Only include samples that the user want
gene_stat = utils.get_values_of_gene_level_coverage_stats_as_dict(self.gene_level_coverage_stats_dict, key, as_pandas=True, samples_of_interest=self.samples)
self.gene_level_coverage_stats_dict_of_dataframes[key] = gene_stat
for key in ['gene_coverage_values_per_nt', 'non_outlier_positions']:
gene_stat = utils.get_values_of_gene_level_coverage_stats_as_dict(self.gene_level_coverage_stats_dict, key, as_pandas=False, samples_of_interest=self.samples)
self.gene_level_coverage_stats_dict_of_dataframes[key] = gene_stat
def init_samples_coverage_stats_dict(self):
""" populate the samples_coverage_stats_dict, and determine positive, negative, and ambiguous samples with the genome detection information
(--alpha, --genome-detection-uncertainty)
The samples_coverage_stats_dict dataframe is used to calculate the gene consistency information.
It is also used for plotting purposes (both for the nucleotide-coverage-distribution plots and the gene-consistency plots).
The coverage_values_per_nt is used to calculate the detection value (portion of nucleotides
covered) for a sample. Then, a cutoff for detection values is used to determine the presence
or absence of the genome in each sample.
"""
if self.coverage_values_per_nt is None:
self.coverage_values_per_nt = get_coverage_values_per_nucleotide(self.split_coverage_values_per_nt_dict, samples=self.samples)
total_length = len(next(iter(self.coverage_values_per_nt.values())))
MCG_samples_information_table_structure = ['samples', 'presence', 'detection', 'number_of_taxon_specific_core_detected']
# create an empty dataframe
samples_information = pd.DataFrame(index=self.samples, columns=MCG_samples_information_table_structure[1:])
positive_samples = []
negative_samples = []
self.progress.new("Finding nucleotide positions in samples with outlier coverage values")
progress.update('...')
num_samples, counter = len(self.samples), 1
detection = {}
total_length = len(next(iter(self.coverage_values_per_nt.values())))
self.samples_coverage_stats_dicts = pd.DataFrame(index=self.samples, columns=columns_for_samples_coverage_stats_dict)
for sample in self.samples:
if num_samples > 100 and counter % 100 == 0:
self.progress.update('%d of %d samples...' % (counter, num_samples))
# get the non-outlier information
non_outlier_indices, self.samples_coverage_stats_dicts.loc[sample,] = get_non_outliers_information(self.coverage_values_per_nt[sample], MAD_threshold=self.outliers_threshold, zeros_are_outliers=self.zeros_are_outliers)
self.non_outlier_indices[sample] = non_outlier_indices
number_of_non_outliers = len(self.non_outlier_indices[sample])
if anvio.DEBUG:
self.run.info_single('The mean and std of non-outliers in sample %s are: %s, %s respectively' % (sample, self.samples_coverage_stats_dicts['non_outlier_mean_coverage'][sample], self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample]))
self.run.info_single('The number of non-outliers is %s of %s (%.2f%%)' % (number_of_non_outliers, total_length, 100.0 * number_of_non_outliers / total_length))
detection[sample] = np.count_nonzero(self.coverage_values_per_nt[sample]) / total_length
samples_information['presence'][sample] = get_presence_absence_information(number_of_non_outliers/total_length, self.alpha)
if detection[sample] <= 0.5:
samples_information['presence'][sample] = False
if samples_information['presence'][sample]:
positive_samples.append(sample)
elif samples_information['presence'][sample] == False:
negative_samples.append(sample)
samples_information['detection'][sample] = detection[sample]
counter += 1
self.positive_samples = positive_samples
self.number_of_positive_samples = len(self.positive_samples)
self.negative_samples = negative_samples
self.samples_detection_information = samples_information
self.run.warning('The number of positive samples is %s' % self.number_of_positive_samples)
self.run.warning('The number of negative samples is %s' % len(self.negative_samples))
self.samples_coverage_stats_dicts_was_initiated = True
self.progress.end()
def plot_nucleotide_coverage_distribution(self):
""" Creates a pdf file with the following plots for each sample the sorted nucleotide coverages \
(with the outliers in red and non-outliers in blue), and a histogram of coverages for the non-outliers"""
# Creating a dircetory for the plots. If running on bins, each bin would be in a separate sub-directory
if not self.samples_coverage_stats_dicts_was_initiated:
self.init_samples_coverage_stats_dict()
plot_dir = self.output_file_prefix + '-nucleotide-coverage-distribution-plots' + '/'
self.progress.new('Saving figures of taxon specific distributions to pdf')
progress.update('...')
number_of_fininshed = 0
for sample in self.positive_samples:
coverages_pdf_output = plot_dir + sample + self.additional_description + '-coverages.pdf'
pdf_output_file = PdfPages(coverages_pdf_output)
v = self.coverage_values_per_nt[sample]
# Using argsort so we can use the non_oulier indices
sorting_indices = np.argsort(v)
# we would need the reverse of the sorting of the indices to create the x axis for the non-outliers
reverse_sorted_indices = np.zeros(len(sorting_indices))
reverse_sorted_indices[sorting_indices] = range(len(reverse_sorted_indices))
# plotting the ordered coverage values (per nucleotide)
# the non-outliers are plotted in blue
# the outlier values are plotted in red
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=True)
ax.set_xlabel = 'Nucleotide Number (ordered)'
ax.set_ylabel = r'$Nucleotide Coverage^2$'
x1 = range(len(v)) # FIXME: this shouldn't be in the loop (only here because I need to fix the mock data)
x2 = reverse_sorted_indices[self.non_outlier_indices[sample]]
#y2 = v[self.non_outlier_indices[sample]]
# plot all in red
ax.semilogy(x1,v[sorting_indices],'r.', rasterized=True)
# plot on top the non-outliers in blue
ax.semilogy(x2,v[self.non_outlier_indices[sample]],'b.', rasterized=True)
fig.suptitle("%s - sorted coverage values with outliers" % sample)
plt.savefig(pdf_output_file, format='pdf')
plt.close()
# plotting a histogram of the non-outliers
# This would allow to see if they resemble a normal distribution
hist_range = (min(v[self.non_outlier_indices[sample]]),max(v[self.non_outlier_indices[sample]]))
# computing the number of bins so that the width of a bin is ~1/4 of the standard deviation
# FIXME: need to make it so the bins are only of integers (so the smallest bin is of width 1
# and that bins are integers)
number_of_hist_bins = np.ceil((hist_range[1] - hist_range[0]) / (self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample]/4)).astype(int) # setting the histogram bins to be of the width of a quarter of std
fig = plt.figure()
ax = fig.add_subplot(111, rasterized=True)
ax.set_xlabel = 'Coverage'
ax.hist(v[self.non_outlier_indices[sample]], number_of_hist_bins,hist_range, rasterized=True)
fig.suptitle("%s - histogram of non-outliers" % sample)
# adding the mean and std of the non-outliers as text to the plot
text_for_hist = u'$\mu = %d$\n $\sigma = %d$' %\
(self.samples_coverage_stats_dicts['non_outlier_mean_coverage'][sample],\
self.samples_coverage_stats_dicts['non_outlier_coverage_std'][sample])
ax.text(0.8, 0.9, text_for_hist, ha='center', va='center', transform=ax.transAxes)
plt.savefig(pdf_output_file, format='pdf')
plt.close()
# close the pdf file
pdf_output_file.close()
number_of_fininshed += 1
self.progress.update("Finished %d of %d" % (number_of_fininshed, self.number_of_positive_samples))
self.progress.end()
def init_gene_presence_absence_in_samples(self):
""" Determining presence and absence of genes in samples according to gene detection values."""
if not self.gene_level_coverage_stats_dict:
raise ConfigError("gene presence/absence in samples cannot be determined without a gene_level_coverage_stats_dict,\
but it seems that you don't have one. maybe you should run init()?")
if self.gene_level_coverage_stats_dict_of_dataframes is None:
self.init_gene_level_coverage_stats_dict_of_dataframes()
gene_callers_id = self.gene_level_coverage_stats_dict_of_dataframes['detection'].index
self.gene_presence_absence_in_samples = pd.DataFrame(index=gene_callers_id, columns=self.samples)
T = lambda x: get_presence_absence_information(sum(x)/len(x), self.alpha)
self.progress.new('Computing gene presence/absence in samples')
progress.update('...')
genes_above_outlier_threshold = pd.DataFrame.from_dict(self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_positions'], orient='index').applymap(T)
genes_with_detection_above_half = self.gene_level_coverage_stats_dict_of_dataframes['detection'].applymap(lambda x: x > 0.5)
self.gene_presence_absence_in_samples = genes_above_outlier_threshold & genes_with_detection_above_half
self.gene_presence_absence_in_samples_initiated = True
self.progress.end()
def init_gene_coverage_consistency_information(self):
""" Perform orthogonal distance regression for each gene to determine coverage consistency.
The question that we are trying to ask is:
Do the non-outlier nt coverage of the gene in samlpes correlates to the non-outlier
nt coverage of the genome in samples?
The regression is performed only for positive samples.
For each gene, the regression is performed only according to samples in which
the gene is present (according to the detection critrea).
"""
if not self.samples_coverage_stats_dicts_was_initiated:
self.init_samples_coverage_stats_dict()
if not self.gene_presence_absence_in_samples_initiated:
self.init_gene_presence_absence_in_samples()
self.progress.new("Computing coverage consistency for all genes.")
progress.update('...')
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
num_genes, counter = len(gene_ids), 1
for gene_id in gene_ids:
if num_genes > 100 and counter % 100 == 0:
self.progress.update('%d of %d genes...' % (counter, num_genes))
# samples in which the gene is present
_samples = self.gene_presence_absence_in_samples.loc[gene_id,self.gene_presence_absence_in_samples.loc[gene_id,]==True].index
# mean and std of non-outlier nt in each sample
x = list(self.samples_coverage_stats_dicts.loc[_samples,'non_outlier_mean_coverage'].values)
if "non_outlier_coverage_std" in self.samples_coverage_stats_dicts:
# we only expect to have the sample coverage std in "full" mode
std_x = list(self.samples_coverage_stats_dicts.loc[_samples,'non_outlier_coverage_std'].values)
else:
std_x = None
if len(_samples) > 1:
# mean and std of non-outlier nt in the gene (in each sample)
y = self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_mean_coverage'].loc[gene_id, _samples].values
std_y = self.gene_level_coverage_stats_dict_of_dataframes['non_outlier_coverage_std'].loc[gene_id, _samples].values
# performing the regression using ODR
_data = odr.RealData(x, y, std_x, std_y)
_model = lambda B, c: B[0] * c
_odr = odr.ODR(_data, odr.Model(_model), beta0=[3])
odr_output = _odr.run()
# store results
self.gene_coverage_consistency_dict[gene_id] = {}
self.gene_coverage_consistency_dict[gene_id]['slope'] = odr_output.beta[0]
self.gene_coverage_consistency_dict[gene_id]['slope_std'] = odr_output.sd_beta[0]
self.gene_coverage_consistency_dict[gene_id]['slope_precision'] = odr_output.sd_beta[0] / odr_output.beta[0]
# compute R squered
f = lambda b: lambda _x: b*_x
R_squered = 1 - sum((np.apply_along_axis(f(odr_output.beta[0]),0,x)-y)**2) / sum((y-np.mean(y))**2)
# Check if converged
self.gene_coverage_consistency_dict[gene_id]['R_squered'] = R_squered
if odr_output.stopreason[0] == 'Sum of squares convergence':
self.gene_coverage_consistency_dict[gene_id]['converged'] = True
else:
self.gene_coverage_consistency_dict[gene_id]['converged'] = False
self.gene_coverage_consistency_dict_initiated = True
self.progress.end()
def get_gene_specificity(self, gene_id):
""" return True for gene if it occurs in positive samples and doesn't occur in negative samples.
Ambiguous occurences are not counted as anything. This means that if a gene is ambiguously
occuring in a negative sample it could still be counted as "specific". It also means that
if a gene is only ambiguously occuring in positive samples then it would be considered
as "non-specific".
"""
if self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] > 1 and self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples'] == 0:
return True
else:
return False
# TODO: if there are no occurences of the gene at all, then we should maybe return None instead of False
def get_gene_coverage_consistency(self, gene_id):
""" return true if the gene's coverage is consistent accross positive samples, False otherwise."""
# TODO: make sure coverage_consistency_dict has been initiated
if self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == 0:
# if the gene doesn't occur in positive samlpes then there is no classification
return None
elif self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == 1:
# if the gene occurs only in one positive sample then return True.
# XXX: we might prefer to return None, we should consider this in the future.
return True
elif self.gene_coverage_consistency_dict[gene_id]['converged']:
# FIXME: this is where we use an arbitrary threshold again :-(
# if the slope precision is smaller than the threshold then the regression
# fit is considered accurate enough and the gene coverage is considered consistent.
return self.gene_coverage_consistency_dict[gene_id]['slope_precision'] < 0.5
else:
# The regression didn't converege so the coverage is probably not consistent.
return False
def determine_if_gene_is_core(self, gene_id, gene_specificity):
""" return True for core gene, False for accessory gene
If the gene is specific to positive samples, then core would be considered if it
occurs in all positive samples. Otherwise it would be considered core if it
occurs in all positive AND all negative samples.
Ambiguous occurences of a gene are not considered (i.e. they are the same as absence).
"""
if gene_specificity:
# return True if the the gene occurs in all positive samples.
return self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] == len(self.positive_samples)
else:
# return True if the gene occurs in all positive AND all negative samples
return self.gene_class_df.loc[gene_id, 'occurence_in_positive_and_negative_samples'] == len(self.positive_samples) + len(self.negative_samples)
def init_gene_class_df(self):
""" generate dictionary with the class information per gene.
This dictionary could be later use to produce an additional-layer
text file for vizualization.
"""
# TODO: make sure gene presence absence was calculated
if not self.gene_coverage_consistency_dict_initiated:
self.init_gene_coverage_consistency_information()
# XXX: only negative and positive samples are used here
# ambiguous samples are ignored as if they were never
# there. This is not ideal, but is easy to do.
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
self.gene_class_df = pd.DataFrame(index=gene_ids)
for gene_id in gene_ids:
# determine the number of occurences in positive samples
self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] = len([s for s in self.positive_samples if self.gene_presence_absence_in_samples.loc[gene_id,s] == True])
# determine the number of occurences in negative samples
self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples'] = len([s for s in self.negative_samples if self.gene_presence_absence_in_samples.loc[gene_id,s] == True])
# set the occurence_in_positive_and_negative_samples
self.gene_class_df.loc[gene_id, 'occurence_in_positive_and_negative_samples'] = self.gene_class_df.loc[gene_id, 'occurence_in_positive_samples'] + self.gene_class_df.loc[gene_id, 'occurence_in_negative_samples']
gene_specificity = self.get_gene_specificity(gene_id)
gene_coverage_consistency = self.get_gene_coverage_consistency(gene_id)
# determine core accessory
gene_is_core = self.determine_if_gene_is_core(gene_id, gene_specificity)
self.gene_class_df.loc[gene_id, 'specificity'] = gene_specificity
self.gene_class_df.loc[gene_id, 'coverage_consistency'] =gene_coverage_consistency
self.gene_class_df.loc[gene_id, 'core'] = gene_is_core
self.gene_class_df.loc[gene_id, 'MCG_class'] = get_class_string(gene_specificity, gene_coverage_consistency, gene_is_core)
def update_samples_information_from_gene_class_df(self):
# after running classification we sum up some information regarding
# the results of the classifier per sample
for sample in self.samples_detection_information:
TSC = [g for g in self.gene_class_df.index if (self.gene_class_df.loc[g,'coverage_consistency'] and \
self.gene_class_df.loc[g,'core'])]
self.samples_detection_information['number_of_taxon_specific_core_detected'] = len(TSC)
def gen_gene_consistency_plots(self):
""" generate and save the gene consistency plots for each gene."""
if not self.gene_coverage_consistency_dict_initiated:
self.init_gene_coverage_consistency_information()
gene_ids = self.gene_level_coverage_stats_dict_of_dataframes['mean_coverage'].index
num_genes, counter = len(gene_ids), 1
progress.new('Plotting gene consistency information')
progress.update('...')
for gene_id in gene_ids:
if num_genes > 100 and counter % 100 == 0:
self.progress.update('%d of %d genes...' % (counter, num_genes))
p = MCGPlots(self, gene_id, run=run, progress=progress)
p.plot()
progress.end()
def save_gene_class_information_in_additional_layers(self):
output_file_path = self.output_file_prefix + self.additional_description + '-additional-layers.txt'
self.gene_class_df.to_csv(output_file_path, sep='\t', index_label='gene_callers_id')
def save_samples_information(self):
samples_information_file_name = self.output_file_prefix + self.additional_description + '-samples-information.txt'
samples_information = pd.concat([self.samples_detection_information, self.samples_coverage_stats_dicts], axis=1, sort=True)
samples_information.to_csv(samples_information_file_name, sep='\t', index_label='samples')
def classify(self):
self.init_gene_class_df()
self.update_samples_information_from_gene_class_df()
if self.write_output_to_files:
self.save_gene_class_information_in_additional_layers()
self.save_samples_information()
if self.gen_figures:
# Create the plots for nucleotide-level coverage data per sample.
self.plot_nucleotide_coverage_distribution()
# generate plots for coverage consistency information for each gene.
self.gen_gene_consistency_plots()
def get_coverage_values_per_nucleotide(split_coverage_values_per_nt_dict, samples=None):
""" Helper function that accepts a split_coverage_values_per_nt_dict and returns a dictionary with
samples as keys and the concatenated coverage values for all splits as one array
"""
if not split_coverage_values_per_nt_dict:
raise ConfigError("You did not provide a split_coverage_values_per_nt_dict, and we need it...")
progress.new('Merging coverage values accross splits')
progress.update('...')
d = {}
if samples is None:
samples = next(iter(split_coverage_values_per_nt_dict.values())).keys()
number_of_samples = len(samples)
number_of_finished = 0
# find the combined legnth of all contigs first
total_length = 0
for split in split_coverage_values_per_nt_dict:
total_length += len(split_coverage_values_per_nt_dict[split][next(iter(samples))])
for sample in samples:
# create an array of zero with the total length
# this is much faster than appending the vectors of splits
d[sample] = np.zeros(total_length)
pos = 0
for split in split_coverage_values_per_nt_dict:
split_values = split_coverage_values_per_nt_dict[split][sample]
split_len = len(split_values)
d[sample][pos:pos+split_len] = split_values
pos += split_len
#d[sample] = np.array(d[sample])
number_of_finished += 1
progress.update("Finished sample %d of %d" % (number_of_finished,number_of_samples))
progress.end()
return d
def get_non_outliers_information(v, MAD_threshold=2.5, zeros_are_outliers=False):
""" returns the non-outliers for the input pandas series using MAD"""
d = pd.Series(index=columns_for_samples_coverage_stats_dict)
outliers = utils.get_list_of_outliers(v, threshold=MAD_threshold, zeros_are_outliers=zeros_are_outliers)
non_outliers = np.logical_not(outliers)
non_outlier_indices = np.where(non_outliers)[0]
if not(len(non_outlier_indices)):
non_outlier_indices = np.array([])
d['non_outlier_mean_coverage'] = 0.0
d['non_outlier_coverage_std'] = 0.0
else:
d['non_outlier_mean_coverage'] = np.mean(v[non_outlier_indices])
d['non_outlier_coverage_std'] = np.std(v[non_outlier_indices])
return non_outlier_indices, d
# The order of the strings is very important since it is used in get_class_string
class_short_names = ['NNA', 'SNA', 'NCA',\
'SCA', 'NNC', 'SNC',\
'NCC', 'SCC']
class_long_names = ['Non-specific_Non-consistent_Accessory', 'Specific_Non-consistent_Accessory', 'Non-specific_Consistent_Accessory',\
'Specific_Consistent_Accessory', 'Non-specific_Non-consistent_Core', 'Specific_Non-consistent_Core',\
'Non-specific_Consistent_Core', 'Specific_Consistent_Core']
class_short_name_long_name_dict = dict(zip(class_short_names,class_long_names))
def get_class_long_name_from_short_name(short_name):
return class_short_name_long_name_dict[short_name]
def get_class_string(gene_specificity, gene_coverage_consistency, gene_is_core):
""" Takes the values of the three categories and returns a string to represent the class."""
value_list = [gene_specificity, gene_coverage_consistency, gene_is_core]
if None in value_list:
return 'NA'
# converting the list of booleans to a number
# this solution was takes from here: https://stackoverflow.com/a/4066807/7115450
index = sum(1<<i for i, b in enumerate(value_list) if b)
return class_short_names[index]
def get_presence_absence_information(number_of_non_outliers, alpha):
""" Helper function to determine presence/absence according to a threshold."""
##### WHAT WE SHOULD DO IN THE FUTURE #####
# Arbitrary cut-offs are terrible.
# If we assume there are no accessory genes (we will get back to this later),
# then if the gnomes is present, then we expect ALL of it to be present. Thus,
# if we had an unlimited number of reads, then we expect detection to be 1.
# as the number of reads gets smaller, the expected detection value is smaller.
# for a given genome size, a given read length, and the number of reads mapped to
# the genome, we can compute the following value: "what is the probability that
# the detection value will be greater than the actual detection value", if that
# probability is high, then that is a good sign that the genome is not present
# in the sample, and that any reads that we got are due to non-specific coverage.
# the same thing could be calculated for a given gene.
# we can create a measure for agreement between the mean coverage of a gene
# and the detection of the gene. It would simply be the probability that the
# coverage of the gene would exist with a detection that is higher than the
# actual detection of the gene. All we need for that is the read length,
# gene/genome length, and the expected genomic portion shared by two genomes that
# belong to the population in question.
if number_of_non_outliers >= 0.5 + alpha:
return True
elif np.sum(number_of_non_outliers) <= 0.5 - alpha:
return False
else:
return None
|
gpl-3.0
|
britodasilva/pyhfo
|
pyhfo/ui/plot_spike.py
|
1
|
3272
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 17:11:16 2015
@author: anderson
"""
# importing modules
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from . import adjust_spines
def plot_single_spk(spk,subplot = None, spines = ['left', 'bottom'],
figure_size = (5,5),dpi=600,**kwargs):
"""
Function to plot single Spike
Parameters
----------
spk: SpikeObj
Spike object to plot
subplot: matplotlib axes
None (default) - create a new figure
ax - axes of figure where figure should plot
spines: str
['left', 'bottom'] (default) - plot figure with left and bottom spines only
figure_size: tuple
(5,5) (default) - Size of figure, tuple of integers with width, height in inches
dpi: int
600 - DPI resolution
**kwargs: matplotlib arguments
"""
if subplot == None:
# Creating the figure
f = plt.figure(figsize=figure_size,dpi=dpi)
# creating the axes
ax = f.add_subplot(111)
else:
ax = subplot
#ax.plot(range(-20,44),spk.waveform,**kwargs)
time_vec = np.linspace(spk.time_edge[0],spk.time_edge[1],spk.waveform.shape[0],endpoint=True)*1000
ax.plot(time_vec,spk.waveform,**kwargs)
plt.xlabel('Time (ms)')
adjust_spines(ax, spines)
def plot_spk_cluster(evlist,cluster,channel,color='b',ax = None, spines = [], plot_mean = True,figure_size=(5,5),dpi=600):
"""
Function to plot cluster of spikes
Parameters
----------
evlist: EventList
EventList object to plot
cluster: int
Number of the cluster
color: str
Color of plot
spines: str
['left', 'bottom'] (default) - plot figure with left and bottom spines only
plot_mean: boolean
True (default) - plot mean line
figure_size: tuple
(5,5) (default) - Size of figure, tuple of integers with width, height in inches
dpi: int
600 - DPI resolution
"""
if ax == None:
# Creating the figure
f = plt.figure(figsize=figure_size,dpi=dpi)
# creating the axes
ax = f.add_subplot(111)
spikes = np.array([]) # creating a empty array
objs = [x for x in evlist.event if x.cluster == cluster and x.channel == channel]
npspk, = objs[0].waveform.shape
time_vec = np.linspace(objs[0].time_edge[0],objs[0].time_edge[1],npspk,endpoint=True)
for sp in objs:
ax.plot(time_vec,sp.waveform,color=color,lw=0.5)
#ax.plot(sp.waveform,color=color,lw=0.5)
spikes = np.append(spikes, sp.waveform)
if plot_mean and len(evlist.event)>1:
spikes = spikes.reshape(len(objs),npspk)
ax.plot(time_vec,np.mean(spikes,axis=0),'k',lw=2)
ax.plot(time_vec,np.mean(spikes,axis=0)-np.std(spikes,axis=0),'k',lw=1)
ax.plot(time_vec,np.mean(spikes,axis=0)+np.std(spikes,axis=0),'k',lw=1)
plt.xlabel('Time (ms)')
#ax.plot(np.mean(spikes,axis=0),'k',lw=2)
#ax.plot(np.mean(spikes,axis=0)-np.std(spikes,axis=0),'k',lw=1)
#ax.plot(np.mean(spikes,axis=0)+np.std(spikes,axis=0),'k',lw=1)
adjust_spines(ax, spines)
|
mit
|
LohithBlaze/scikit-learn
|
examples/cluster/plot_agglomerative_clustering.py
|
343
|
2931
|
"""
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
bsd-3-clause
|
Eric89GXL/mne-python
|
mne/parallel.py
|
14
|
6545
|
"""Parallel util function."""
# Author: Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import logging
import os
from . import get_config
from .utils import logger, verbose, warn, ProgressBar
from .utils.check import int_like
from .fixes import _get_args
if 'MNE_FORCE_SERIAL' in os.environ:
_force_serial = True
else:
_force_serial = None
@verbose
def parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',
total=None, prefer=None, verbose=None):
"""Return parallel instance with delayed function.
Util function to use joblib only if available
Parameters
----------
func : callable
A function.
n_jobs : int
Number of jobs to run in parallel.
max_nbytes : int, str, or None
Threshold on the minimum size of arrays passed to the workers that
triggers automated memory mapping. Can be an int in Bytes,
or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays. Use 'auto' to
use the value set using mne.set_memmap_min_size.
pre_dispatch : int, or str, optional
See :class:`joblib.Parallel`.
total : int | None
If int, use a progress bar to display the progress of dispatched
jobs. This should only be used when directly iterating, not when
using ``split_list`` or :func:`np.array_split`.
If None (default), do not add a progress bar.
prefer : str | None
If str, can be "processes" or "threads". See :class:`joblib.Parallel`.
Ignored if the joblib version is too old to support this.
.. versionadded:: 0.18
%(verbose)s INFO or DEBUG
will print parallel status, others will not.
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object.
my_func: callable
``func`` if not parallel or delayed(func).
n_jobs: int
Number of jobs >= 0.
"""
should_print = (logger.level <= logging.INFO)
# for a single job, we don't need joblib
if n_jobs != 1:
try:
from joblib import Parallel, delayed
except ImportError:
try:
from sklearn.externals.joblib import Parallel, delayed
except ImportError:
warn('joblib not installed. Cannot run in parallel.')
n_jobs = 1
if n_jobs == 1:
n_jobs = 1
my_func = func
parallel = list
else:
# check if joblib is recent enough to support memmaping
p_args = _get_args(Parallel.__init__)
joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)
cache_dir = get_config('MNE_CACHE_DIR', None)
if isinstance(max_nbytes, str) and max_nbytes == 'auto':
max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)
if max_nbytes is not None:
if not joblib_mmap and cache_dir is not None:
warn('"MNE_CACHE_DIR" is set but a newer version of joblib is '
'needed to use the memmapping pool.')
if joblib_mmap and cache_dir is None:
logger.info(
'joblib supports memapping pool but "MNE_CACHE_DIR" '
'is not set in MNE-Python config. To enable it, use, '
'e.g., mne.set_cache_dir(\'/tmp/shm\'). This will '
'store temporary files under /dev/shm and can result '
'in large memory savings.')
# create keyword arguments for Parallel
kwargs = {'verbose': 5 if should_print and total is None else 0}
kwargs['pre_dispatch'] = pre_dispatch
if 'prefer' in p_args:
kwargs['prefer'] = prefer
if joblib_mmap:
if cache_dir is None:
max_nbytes = None # disable memmaping
kwargs['temp_folder'] = cache_dir
kwargs['max_nbytes'] = max_nbytes
n_jobs = check_n_jobs(n_jobs)
parallel = _check_wrapper(Parallel(n_jobs, **kwargs))
my_func = delayed(func)
if total is not None:
def parallel_progress(op_iter):
return parallel(ProgressBar(iterable=op_iter, max_value=total))
parallel_out = parallel_progress
else:
parallel_out = parallel
return parallel_out, my_func, n_jobs
def _check_wrapper(fun):
def run(*args, **kwargs):
try:
return fun(*args, **kwargs)
except RuntimeError as err:
msg = str(err.args[0]) if err.args else ''
if msg.startswith('The task could not be sent to the workers'):
raise RuntimeError(
msg + ' Consider using joblib memmap caching to get '
'around this problem. See mne.set_mmap_min_size, '
'mne.set_cache_dir, and buffer_size parallel function '
'arguments (if applicable).')
raise
return run
def check_n_jobs(n_jobs, allow_cuda=False):
"""Check n_jobs in particular for negative values.
Parameters
----------
n_jobs : int
The number of jobs.
allow_cuda : bool
Allow n_jobs to be 'cuda'. Default: False.
Returns
-------
n_jobs : int
The checked number of jobs. Always positive (or 'cuda' if
applicable).
"""
if not isinstance(n_jobs, int_like):
if not allow_cuda:
raise ValueError('n_jobs must be an integer')
elif not isinstance(n_jobs, str) or n_jobs != 'cuda':
raise ValueError('n_jobs must be an integer, or "cuda"')
# else, we have n_jobs='cuda' and this is okay, so do nothing
elif _force_serial:
n_jobs = 1
logger.info('... MNE_FORCE_SERIAL set. Processing in forced '
'serial mode.')
elif n_jobs <= 0:
try:
import multiprocessing
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if n_jobs <= 0:
raise ValueError('If n_jobs has a negative value it must not '
'be less than the number of CPUs present. '
'You\'ve got %s CPUs' % n_cores)
except ImportError:
# only warn if they tried to use something other than 1 job
if n_jobs != 1:
warn('multiprocessing not installed. Cannot run in parallel.')
n_jobs = 1
return n_jobs
|
bsd-3-clause
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/simulation_results/Combined/hmm_crossvalidation_force_motion_10_states.py
|
1
|
17337
|
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import random
import sys
sys.path.insert(0, '/home/tapo/git/hrl_haptic_manipulation_in_clutter/sandbox_tapo_darpa_m3/src/skin_related/Classification/Classification_with_HMM/Single_Contact_Classification/multivariate_gaussian_emissions')
from test_crossvalidation_force_motion_10_states import cov_rf
from test_crossvalidation_force_motion_10_states import cov_rm
from test_crossvalidation_force_motion_10_states import cov_sf
from test_crossvalidation_force_motion_10_states import cov_sm
#print cov_rf
def scaling(Fvec_a,Fvec_c):
# With Scaling
max_a = np.max(abs(Fvec_a))
min_a = np.min(abs(Fvec_a))
mean_a = np.mean(Fvec_a)
std_a = np.std(Fvec_a)
#Fvec_a = (Fvec_a)/max_a
#Fvec_a = (Fvec_a-mean_a)
#Fvec_a = (Fvec_a-mean_a)/max_a
Fvec_a = (Fvec_a-mean_a)/std_a
max_c = np.max(abs(Fvec_c))
min_c = np.min(abs(Fvec_c))
mean_c = np.mean(Fvec_c)
std_c = np.std(Fvec_c)
#Fvec_c = (Fvec_c)/max_c
#Fvec_c = (Fvec_c-mean_c)
#Fvec_c = (Fvec_c-mean_c)/max_c
Fvec_c = (Fvec_c-mean_c)/std_c
#Fvec_c = Fvec_c*np.max((max_a,max_c))/max_c
data = np.concatenate((Fvec_a,Fvec_c),axis=0)
#print np.shape(data)
return data
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_cov(fvec1,fvec2):
index = 0
m,n = np.shape(fvec1)
#print m,n
mu_1 = np.zeros((10,1))
mu_2 = np.zeros((10,1))
cov = np.zeros((10,2,2))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:]
#print temp_fvec1
temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:]
temp_fvec1 = np.reshape(temp_fvec1,DIVS*n)
#print temp_fvec1
temp_fvec2 = np.reshape(temp_fvec2,DIVS*n)
mu_1[index] = np.mean(temp_fvec1)
mu_2[index] = np.mean(temp_fvec2)
cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
if index == 0:
print 'mean = ', mu_2[index]
#print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:])
#print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0))
#print cov[index,:,:]
#print scp.std(fvec2[(m_init):(m_init+DIVS),0:])
#print scp.std(temp_fvec2)
index = index+1
return mu_1,mu_2,cov
if __name__ == '__main__':
### Simulation Data
tSamples = 121
data_rf = scipy.io.loadmat('rigid_fixed.mat')
data_sf = scipy.io.loadmat('soft_fixed.mat')
data_rm = scipy.io.loadmat('rigid_movable.mat')
data_sm = scipy.io.loadmat('soft_movable.mat')
simulforce = np.zeros((tSamples,8000))
datatime = np.arange(0,1.21,0.01)
dataforce_rf = np.transpose(data_rf['sensed_force_rf'])
dataforce_sf = np.transpose(data_sf['sensed_force_sf'])
dataforce_rm = np.transpose(data_rm['sensed_force_rm'])
dataforce_sm = np.transpose(data_sm['sensed_force_sm'])
datamotion_rf = np.transpose(data_rf['robot_pos_rf'])
datamotion_sf = np.transpose(data_sf['robot_pos_sf'])
datamotion_rm = np.transpose(data_rm['robot_pos_rm'])
datamotion_sm = np.transpose(data_sm['robot_pos_sm'])
data_RF = scaling(dataforce_rf,datamotion_rf)
data_SF = scaling(dataforce_sf,datamotion_sf)
data_RM = scaling(dataforce_rm,datamotion_rm)
data_SM = scaling(dataforce_sm,datamotion_sm)
#print np.shape(data_RF)
simuldata = np.concatenate((data_RF, data_RM, data_SF, data_SM), axis = 1)
Fmat = np.matrix(simuldata)
#print np.shape(Fmat[0])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_rf_force,mu_rf_motion,cov_rf_sim = feature_to_mu_cov(Fmat[0:121,0:2000],Fmat[121:242,0:2000])
mu_rm_force,mu_rm_motion,cov_rm_sim = feature_to_mu_cov(Fmat[0:121,2000:4000],Fmat[121:242,2000:4000])
mu_sf_force,mu_sf_motion,cov_sf_sim = feature_to_mu_cov(Fmat[0:121,4000:6000],Fmat[121:242,4000:6000])
mu_sm_force,mu_sm_motion,cov_sm_sim = feature_to_mu_cov(Fmat[0:121,6000:8000],Fmat[121:242,6000:8000])
#print [mu_rf, sigma_rf]
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.20, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = [0.0]*10
B_rm = [0.0]*10
B_sf = [0.0]*10
B_sm = [0.0]*10
#for num_states in range(10):
#B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[cov_rf_sim[num_states][0][0],cov_rf_sim[num_states][0][1],cov_rf_sim[num_states][1][0],cov_rf_sim[num_states][1][1]]]
#B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[cov_rm_sim[num_states][0][0],cov_rm_sim[num_states][0][1],cov_rm_sim[num_states][1][0],cov_rm_sim[num_states][1][1]]]
#B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[cov_sf_sim[num_states][0][0],cov_sf_sim[num_states][0][1],cov_sf_sim[num_states][1][0],cov_sf_sim[num_states][1][1]]]
#B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[cov_sm_sim[num_states][0][0],cov_sm_sim[num_states][0][1],cov_sm_sim[num_states][1][0],cov_sm_sim[num_states][1][1]]]
#print cov_rf_sim[num_states][0][0],cov_rf_sim[num_states][0][1],cov_rf_sim[num_states][1][0],cov_rf_sim[num_states][1][1]
#print "----"
for num_states in range(10):
B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1]]]
B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[cov_rm[num_states][0][0],cov_rm[num_states][0][1],cov_rm[num_states][1][0],cov_rm[num_states][1][1]]]
B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[cov_sf[num_states][0][0],cov_sf[num_states][0][1],cov_sf[num_states][1][0],cov_sf[num_states][1][1]]]
B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[cov_sm[num_states][0][0],cov_sm[num_states][0][1],cov_sm[num_states][1][0],cov_sm[num_states][1][1]]]
#print cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1]
#print "----"
#for num_states in range(10):
#B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[1.3,0.3,0.9,1.7]]
#B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[1.4,0.4,0.6,1.8]]
#B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[1.3,0.5,0.9,1.7]]
#B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[1.5,0.3,0.7,0.5]]
#print cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1]
#print "----"
#print B_sm
#print mu_sm_motion
# pi - initial probabilities per state
pi = [0.1] * 10
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm, pi) # Will be Trained
# For Training
Nfolds = 1
rf_final = np.matrix(np.zeros((1600,1)))
rm_final = np.matrix(np.zeros((1600,1)))
sf_final = np.matrix(np.zeros((1600,1)))
sm_final = np.matrix(np.zeros((1600,1)))
total_seq = np.zeros((242,8000))
temp_seq1 = Fmat[0:121,:]
temp_seq2 = Fmat[121:242,:]
i = 0
j = 0
while i < (np.size(Fmat,0)):
total_seq[i] = temp_seq1[j]
total_seq[i+1] = temp_seq2[j]
j=j+1
i=i+2
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
while (Nfolds < 6):
# For Training
if (Nfolds == 1):
total_seq_rf = total_seq[:,400:2000]
total_seq_rm = total_seq[:,2400:4000]
total_seq_sf = total_seq[:,4400:6000]
total_seq_sm = total_seq[:,6400:8000]
if (Nfolds == 2):
total_seq_rf = np.column_stack((total_seq[:,0:400],total_seq[:,800:2000]))
total_seq_rm = np.column_stack((total_seq[:,2000:2400],total_seq[:,2800:4000]))
total_seq_sf = np.column_stack((total_seq[:,4000:4400],total_seq[:,4800:6000]))
total_seq_sm = np.column_stack((total_seq[:,6000:6400],total_seq[:,6800:8000]))
if (Nfolds == 3):
total_seq_rf = np.column_stack((total_seq[:,0:800],total_seq[:,1200:2000]))
total_seq_rm = np.column_stack((total_seq[:,2000:2800],total_seq[:,3200:4000]))
total_seq_sf = np.column_stack((total_seq[:,4000:4800],total_seq[:,5200:6000]))
total_seq_sm = np.column_stack((total_seq[:,6000:6800],total_seq[:,7200:8000]))
if (Nfolds == 4):
total_seq_rf = np.column_stack((total_seq[:,0:1200],total_seq[:,1600:2000]))
total_seq_rm = np.column_stack((total_seq[:,2000:3200],total_seq[:,3600:4000]))
total_seq_sf = np.column_stack((total_seq[:,4000:5200],total_seq[:,5600:6000]))
total_seq_sm = np.column_stack((total_seq[:,6000:7200],total_seq[:,7600:8000]))
if (Nfolds == 5):
total_seq_rf = total_seq[:,0:1600]
total_seq_rm = total_seq[:,2000:3600]
total_seq_sf = total_seq[:,4000:5600]
total_seq_sm = total_seq[:,6000:7600]
train_seq_rf = (np.array(total_seq_rf).T).tolist()
train_seq_rm = (np.array(total_seq_rm).T).tolist()
train_seq_sf = (np.array(total_seq_sf).T).tolist()
train_seq_sm = (np.array(total_seq_sm).T).tolist()
#print train_seq_rf
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (Nfolds == 1):
total_seq_rf = total_seq[:,0:400]
total_seq_rm = total_seq[:,2000:2400]
total_seq_sf = total_seq[:,4000:4400]
total_seq_sm = total_seq[:,6000:6400]
if (Nfolds == 2):
total_seq_rf = total_seq[:,400:800]
total_seq_rm = total_seq[:,2400:2800]
total_seq_sf = total_seq[:,4400:4800]
total_seq_sm = total_seq[:,6400:6800]
if (Nfolds == 3):
total_seq_rf = total_seq[:,800:1200]
total_seq_rm = total_seq[:,2800:3200]
total_seq_sf = total_seq[:,4800:5200]
total_seq_sm = total_seq[:,6800:7200]
if (Nfolds == 4):
total_seq_rf = total_seq[:,1200:1600]
total_seq_rm = total_seq[:,3200:3600]
total_seq_sf = total_seq[:,5200:5600]
total_seq_sm = total_seq[:,7200:7600]
if (Nfolds == 5):
total_seq_rf = total_seq[:,1600:2000]
total_seq_rm = total_seq[:,3600:4000]
total_seq_sf = total_seq[:,5600:6000]
total_seq_sm = total_seq[:,7600:8000]
total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm)))
rf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sf = np.matrix(np.zeros(np.size(total_seq_obj,1)))
sm = np.matrix(np.zeros(np.size(total_seq_obj,1)))
#print np.shape(rf)
#print np.size(total_seq_obj,1)
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[:,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_rf_obj = model_rf.viterbi(final_ts_obj)
print "Rigid_Fixed_Model_Path"
print path_rf_obj
#print np.shape(path_rf_obj[0])
path_rm_obj = model_rm.viterbi(final_ts_obj)
print "Rigid_Movable_Model_Path"
print path_rm_obj
#print np.shape(path_rm_obj[0])
path_sf_obj = model_sf.viterbi(final_ts_obj)
print "Soft_Fixed_Model_Path"
print path_sf_obj
#print np.shape(path_sf_obj[0])
path_sm_obj = model_sm.viterbi(final_ts_obj)
print "Soft_Movable_Model_Path"
print path_sm_obj
#print np.shape(path_sm_obj[0])
obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1])
#print obj
if obj == path_rf_obj[1]:
rf[0,k] = 1
elif obj == path_rm_obj[1]:
rm[0,k] = 1
elif obj == path_sf_obj[1]:
sf[0,k] = 1
else:
sm[0,k] = 1
k = k+1
#print rf.T
rf_final = rf_final + rf.T
rm_final = rm_final + rm.T
sf_final = sf_final + sf.T
sm_final = sm_final + sm.T
Nfolds = Nfolds + 1
#print rf_final
#print rm_final
#print sf_final
#print sm_final
# Confusion Matrix
cmat = np.zeros((4,4))
arrsum_rf = np.zeros((4,1))
arrsum_rm = np.zeros((4,1))
arrsum_sf = np.zeros((4,1))
arrsum_sm = np.zeros((4,1))
k = 400
i = 0
while (k < 1601):
arrsum_rf[i] = np.sum(rf_final[k-400:k,0])
arrsum_rm[i] = np.sum(rm_final[k-400:k,0])
arrsum_sf[i] = np.sum(sf_final[k-400:k,0])
arrsum_sm[i] = np.sum(sm_final[k-400:k,0])
i = i+1
k = k+400
i=0
while (i < 4):
j=0
while (j < 4):
if (i == 0):
cmat[i][j] = arrsum_rf[j]
elif (i == 1):
cmat[i][j] = arrsum_rm[j]
elif (i == 2):
cmat[i][j] = arrsum_sf[j]
else:
cmat[i][j] = arrsum_sm[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels], cmap='gray_r')
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j], color='k')
if cmat[i][j] > 1800:
pp.text(j+0.5,3.5-i,cmat[i][j], color='w')
j = j+1
i = i+1
pp.show()
|
mit
|
rcarmo/crab
|
getdata.py
|
1
|
1238
|
from utils import *
url_legco_hk_vote_matrix = 'https://course.ie.cuhk.edu.hk/~engg4030/tutorial/tutorial7/votes-matrix.csv'
import pandas as pd
df = pd.io.parsers.read_csv(url_legco_hk_vote_matrix, index_col='member')
print 'number of users:', len(df.index)
print 'number of items:', len(df.columns)
df[:5]
# Convert votes to numeric values
def vote_to_numeric(v):
if v == 'Yes':
return 1
elif v == 'No':
return -1
return 0
df = df.applymap(vote_to_numeric)
df[:5]
all_data_points = []
for item in df.columns:
for user in df[item].keys():
rating = df[item][user]
all_data_points.append((user, item, rating))
print 'len of all_data_points:', len(all_data_points)
nonzero_data_points = filter(lambda x: x[2] != 0, all_data_points)
print 'len of nonzero_data_points:', len(nonzero_data_points)
print all_data_points[:5]
train_data_points, test_data_points = split_data_points(nonzero_data_points, int(0.9 * len(nonzero_data_points)))
print 'len of train_data_points:', len(train_data_points)
print 'len of test_data_points:', len(test_data_points)
import shelve
s = shelve.open('data.shelve')
s['train_data_points'] = train_data_points
s['test_data_points'] = test_data_points
s.close()
|
bsd-3-clause
|
tobegit3hub/ml_implementation
|
data_analysis/pandas_project/analysis_csv.py
|
1
|
2681
|
#!/usr/bin/env python
import numpy as np
import scipy as sp
import pandas as pd
import pprint
def main():
# Load CSV file
csv_file_path = "../data/train.csv"
#csv_file_path = "../data/train_fe1.csv"
dataset = pd.read_csv(csv_file_path)
view_sample_dataset(dataset)
print_dataset_info(dataset)
print_features_info(dataset)
def view_sample_dataset(dataset):
print("\n[Debug] Print the sample of the dataset: ")
dataset_sample = dataset.head(1)
print(dataset_sample)
def print_dataset_info(dataset):
print("\n[Debug] Print the total number of the examples: ")
example_number = len(dataset)
print(example_number)
print("\n[Debug] Print the info of the dataset: ")
dataset_info = dataset.info()
print(dataset_info)
def print_features_info(dataset):
features_and_types = dataset.dtypes
print("\n[Debug] Print the feature number: ")
numberic_feature_number = 0
not_numberic_feature_number = 0
for feature_type in features_and_types:
if feature_type == np.int16 or feature_type == np.int32 or feature_type == np.int64 or feature_type == np.float16 or feature_type == np.float32 or feature_type == np.float64 or feature_type == np.float128 or feature_type == np.double:
numberic_feature_number += 1
else:
not_numberic_feature_number += 1
print("Total feature number: {}".format(len(features_and_types)))
print("Numberic feature number: {}".format(numberic_feature_number))
print("Not numberic feature number: {}".format(not_numberic_feature_number))
print("\n[Debug] Print the feature list of the dataset: ")
print(features_and_types)
print("\n[Debug] Print the feature presence: ")
example_number = len(dataset)
features_array = list(dataset.columns.values)
for feature_name in features_array:
feature_presence_number = len(dataset[feature_name][dataset[feature_name].notnull()])
feature_presence_percentage = 100.0 * feature_presence_number / example_number
# Example: "Age: 80.1346801347% (714 / 891)"
print("{}: {}% ({} / {})".format(feature_name, feature_presence_percentage, feature_presence_number, example_number))
print("\n[Debug] For numberic features, print the feature statistics: ")
feature_statistics = dataset.describe()
print(feature_statistics)
top_k_number = 5
print("\n[Debug] For all features, print the top {} values: ".format(top_k_number))
for i in range(len(features_array)):
feature_name = features_array[i]
top_k_feature_info = dataset[feature_name].value_counts()[:top_k_number]
print("\nFeature {} and the top {} values:".format(feature_name, top_k_number))
print(top_k_feature_info)
if __name__ == "__main__":
main()
|
mit
|
stonebig/bokeh
|
bokeh/core/property/datetime.py
|
2
|
5422
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide date and time related properties
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import datetime
import dateutil.parser
# External imports
from six import string_types
# Bokeh imports
from ...util.dependencies import import_optional
from .bases import Property
from .primitive import bokeh_integer_types
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Date',
'Datetime',
'TimeDelta',
)
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Date(Property):
''' Accept Date (but not DateTime) values.
'''
def __init__(self, default=None, help=None):
super(Date, self).__init__(default=default, help=help)
def transform(self, value):
value = super(Date, self).transform(value)
if isinstance(value, (float,) + bokeh_integer_types):
try:
value = datetime.date.fromtimestamp(value)
except (ValueError, OSError):
value = datetime.date.fromtimestamp(value/1000)
elif isinstance(value, string_types):
value = dateutil.parser.parse(value).date()
return value
def validate(self, value, detail=True):
super(Date, self).validate(value, detail)
if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)):
msg = "" if not detail else "expected a date, string or timestamp, got %r" % value
raise ValueError(msg)
class Datetime(Property):
''' Accept Datetime values.
'''
def __init__(self, default=datetime.date.today(), help=None):
super(Datetime, self).__init__(default=default, help=help)
def transform(self, value):
value = super(Datetime, self).transform(value)
return value
# Handled by serialization in protocol.py for now
def validate(self, value, detail=True):
super(Datetime, self).validate(value, detail)
datetime_types = (datetime.datetime, datetime.date)
try:
import numpy as np
datetime_types += (np.datetime64,)
except (ImportError, AttributeError) as e:
if e.args == ("'module' object has no attribute 'datetime64'",):
import sys
if 'PyPy' in sys.version:
pass
else:
raise e
else:
pass
if (isinstance(value, datetime_types)):
return
if pd and isinstance(value, (pd.Timestamp)):
return
msg = "" if not detail else "Expected a datetime instance, got %r" % value
raise ValueError(msg)
class TimeDelta(Property):
''' Accept TimeDelta values.
'''
def __init__(self, default=datetime.timedelta(), help=None):
super(TimeDelta, self).__init__(default=default, help=help)
def transform(self, value):
value = super(TimeDelta, self).transform(value)
return value
# Handled by serialization in protocol.py for now
def validate(self, value, detail=True):
super(TimeDelta, self).validate(value, detail)
timedelta_types = (datetime.timedelta,)
try:
import numpy as np
timedelta_types += (np.timedelta64,)
except (ImportError, AttributeError) as e:
if e.args == ("'module' object has no attribute 'timedelta64'",):
import sys
if 'PyPy' in sys.version:
pass
else:
raise e
else:
pass
if (isinstance(value, timedelta_types)):
return
if pd and isinstance(value, (pd.Timedelta)):
return
msg = "" if not detail else "Expected a timedelta instance, got %r" % value
raise ValueError(msg)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
|
ajaybhat/scikit-image
|
doc/examples/transform/plot_ransac.py
|
13
|
1595
|
"""
=========================================
Robust line model estimation using RANSAC
=========================================
In this example we see how to robustly fit a line model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from skimage.measure import LineModelND, ransac
np.random.seed(seed=1)
# generate coordinates of line
x = np.arange(-200, 200)
y = 0.2 * x + 20
data = np.column_stack([x, y])
# add faulty data
faulty = np.array(30 * [(180., -100)])
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
# fit line using all data
model = LineModelND()
model.estimate(data)
# robustly fit line only using inlier data with RANSAC algorithm
model_robust, inliers = ransac(data, LineModelND, min_samples=2,
residual_threshold=1, max_trials=1000)
outliers = inliers == False
# generate coordinates of estimated models
line_x = np.arange(-250, 250)
line_y = model.predict_y(line_x)
line_y_robust = model_robust.predict_y(line_x)
fig, ax = plt.subplots()
ax.plot(data[inliers, 0], data[inliers, 1], '.b', alpha=0.6,
label='Inlier data')
ax.plot(data[outliers, 0], data[outliers, 1], '.r', alpha=0.6,
label='Outlier data')
ax.plot(line_x, line_y, '-k', label='Line model from all data')
ax.plot(line_x, line_y_robust, '-b', label='Robust line model')
ax.legend(loc='lower left')
plt.show()
|
bsd-3-clause
|
glouppe/scikit-learn
|
examples/linear_model/plot_sgd_penalties.py
|
124
|
1877
|
"""
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
l1_color = "navy"
l2_color = "c"
elastic_net_color = "darkorange"
lw = 2
plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw)
plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw)
plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw)
plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw)
plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw)
plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw)
plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw)
plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw)
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
|
bsd-3-clause
|
cloud-fan/spark
|
python/pyspark/pandas/data_type_ops/num_ops.py
|
1
|
16890
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from typing import TYPE_CHECKING, Union
import numpy as np
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin, numpy_column_op
from pyspark.pandas.data_type_ops.base import (
is_valid_operand_for_numeric_arithmetic,
DataTypeOps,
transform_boolean_operand_to_numeric,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.internal import InternalField
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import Dtype, extension_dtypes, pandas_on_spark_type
from pyspark.sql import functions as F
from pyspark.sql.column import Column
from pyspark.sql.types import (
BooleanType,
DoubleType,
FloatType,
StringType,
TimestampType,
)
if TYPE_CHECKING:
from pyspark.pandas.indexes import Index # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
class NumericOps(DataTypeOps):
"""The class for binary operations of numeric pandas-on-Spark objects."""
@property
def pretty_name(self) -> str:
return "numerics"
def add(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__add__)(left, right)
def sub(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__sub__)(left, right)
def mod(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("modulo can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def mod(left, right):
return ((left % right) + right) % right
return column_op(mod)(left, right)
def pow(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("exponentiation can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def pow_func(left, right):
return F.when(left == 1, left).otherwise(Column.__pow__(left, right))
return column_op(pow_func)(left, right)
def radd(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("string addition can only be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("addition can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__radd__)(left, right)
def rsub(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("subtraction can not be applied to string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("subtraction can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rsub__)(left, right)
def rmul(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if not isinstance(right, numbers.Number):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right)
return column_op(Column.__rmul__)(left, right)
def rpow(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("exponentiation can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("exponentiation can not be applied to given types.")
def rpow_func(left, right):
return F.when(F.lit(right == 1), right).otherwise(Column.__rpow__(left, right))
right = transform_boolean_operand_to_numeric(right)
return column_op(rpow_func)(left, right)
def rmod(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("modulo can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("modulo can not be applied to given types.")
def rmod(left, right):
return ((right % left) + left) % left
right = transform_boolean_operand_to_numeric(right)
return column_op(rmod)(left, right)
class IntegralOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
LongType, IntegerType, ByteType and ShortType.
"""
@property
def pretty_name(self) -> str:
return "integrals"
def mul(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType):
return column_op(SF.repeat)(right, left)
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left, right):
return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(
F.lit(np.inf).__div__(left)
)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left, right):
return F.when(F.lit(right is np.nan), np.nan).otherwise(
F.when(
F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(F.lit(np.inf).__div__(left))
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left, right):
return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(
F.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left, right):
return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(
F.floor(F.lit(right).__div__(left))
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def astype(
self, index_ops: Union["Index", "Series"], dtype: Union[str, type, Dtype]
) -> Union["Index", "Series"]:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
class FractionalOps(NumericOps):
"""
The class for binary operations of pandas-on-Spark objects with spark types:
FloatType, DoubleType and DecimalType.
"""
@property
def pretty_name(self) -> str:
return "fractions"
def mul(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, TimestampType):
raise TypeError("multiplication can not be applied to date times.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("multiplication can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return column_op(Column.__mul__)(left, right)
def truediv(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def truediv(left, right):
return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(
F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(
F.lit(np.inf).__div__(left)
)
)
return numpy_column_op(truediv)(left, right)
def floordiv(self, left, right) -> Union["Series", "Index"]:
if (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, StringType)
) or isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not is_valid_operand_for_numeric_arithmetic(right):
raise TypeError("division can not be applied to given types.")
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
def floordiv(left, right):
return F.when(F.lit(right is np.nan), np.nan).otherwise(
F.when(
F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(
F.lit(np.inf).__div__(left)
)
)
)
return numpy_column_op(floordiv)(left, right)
def rtruediv(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rtruediv(left, right):
return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(
F.lit(right).__truediv__(left)
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rtruediv)(left, right)
def rfloordiv(self, left, right) -> Union["Series", "Index"]:
if isinstance(right, str):
raise TypeError("division can not be applied on string series or literals.")
if not isinstance(right, numbers.Number):
raise TypeError("division can not be applied to given types.")
def rfloordiv(left, right):
return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(
F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))
)
right = transform_boolean_operand_to_numeric(right, left.spark.data_type)
return numpy_column_op(rfloordiv)(left, right)
def astype(
self, index_ops: Union["Index", "Series"], dtype: Union[str, type, Dtype]
) -> Union["Index", "Series"]:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
if isinstance(index_ops.spark.data_type, (FloatType, DoubleType)):
scol = F.when(
index_ops.spark.column.isNull() | F.isnan(index_ops.spark.column),
F.lit(True),
).otherwise(index_ops.spark.column.cast(spark_type))
else: # DecimalType
scol = F.when(index_ops.spark.column.isNull(), F.lit(False)).otherwise(
index_ops.spark.column.cast(spark_type)
)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0]),
field=InternalField(dtype=dtype),
)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype, null_str=str(np.nan))
else:
return _as_other_type(index_ops, dtype, spark_type)
|
apache-2.0
|
tapomayukh/projects_in_python
|
classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/single_feature/best_kNN_PCA/test11_cross_validate_categories_1200ms_scaled_method_v_motion.py
|
1
|
5050
|
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 41:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[82:123,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
|
mit
|
pylayers/pylayers
|
pylayers/gui/__init__.py
|
3
|
1786
|
"""
pylayers
=========
This file is adapted from scikit-learn package
"""
import sys
__version__ = '0.12-git'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__PYLAYERS_SETUP__
except NameError:
__PYLAYERS_SETUP__ = False
if __PYLAYERS_SETUP__:
sys.stderr.write('Partial import of pylayers during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from pylayers import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label, verbose=verbose,
extra_argv=extra_argv,
doctests=doctests, coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['gis', 'signal', 'antprop', 'simul','util']
|
mit
|
henridwyer/scikit-learn
|
sklearn/datasets/tests/test_mldata.py
|
384
|
5221
|
"""Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
|
bsd-3-clause
|
ostrokach/elaspic
|
tests/test_database_pipeline.py
|
1
|
6683
|
import os.path as op
import logging
import random
import pytest
import pandas as pd
from elaspic import conf
logger = logging.getLogger(__name__)
# Constants
QUICK = False
CONFIG_FILE = op.join(op.dirname(__file__), 'config_file_database.ini')
if hasattr(pytest, "config"):
QUICK = pytest.config.getoption('--quick')
CONFIG_FILE = pytest.config.getoption('--config-file') or CONFIG_FILE
conf.read_configuration_file(CONFIG_FILE, unique_temp_dir=None)
assert conf.CONFIGS['db_type']
logger.debug('Running quick: {}'.format(QUICK))
logger.debug('Config file: {}'.format(CONFIG_FILE))
# Imports that require a parsed config file
import helper_fns # noqa
from elaspic import elaspic_database # noqa
db = elaspic_database.MyDatabase()
conf.CONFIGS['engine'] = db.get_engine()
conf.CONFIGS['engine'].execute("SET sql_mode = ''")
test_cases = []
def append_test_cases(df, num=3, num_mutations=3):
""".
Parameters
----------
df : DataFrame
Contains the following columns:
- `uniprot_id`
- `uniprot_sequence`
- `interacting_aa` OR `model_domain_def` OR `domain_def`
"""
if QUICK:
num = 1
for i in range(num):
row_idx = random.randint(0, len(df) - 1)
if df.empty:
raise Exception('empty dataframe supplied: {}'.format(df))
row = df.iloc[row_idx]
uniprot_id = row['uniprot_id']
uniprot_sequence = row['uniprot_sequence']
logger.debug('Protein ID: {}'.format(uniprot_id))
for i in range(num_mutations):
if 'interacting_aa_1' in row and pd.notnull(row['interacting_aa_1']):
mutation_pos = random.choice([int(x) for x in row['interacting_aa_1'].split(',')])
logger.debug('Selected interface AA: {}'.format(mutation_pos))
elif 'model_domain_def' in row and pd.notnull(row['model_domain_def']):
domain_start, domain_end = (int(x) for x in row['model_domain_def'].split(':'))
mutation_pos = random.randint(domain_start, domain_end)
logger.debug(
'Selected AA: {} falling inside model domain: {}'
.format(mutation_pos, row['model_domain_def'])
)
elif 'domain_def' in row and pd.notnull(row['domain_def']):
domain_start, domain_end = (int(x) for x in row['domain_def'].split(':'))
mutation_pos = random.randint(domain_start, domain_end)
logger.debug(
'Selected AA: {} falling inside domain: {}'
.format(mutation_pos, row['domain_def'])
)
else:
mutation_pos = random.randint(1, len(uniprot_sequence))
mutation_from = uniprot_sequence[mutation_pos - 1]
mutation_to = random.choice('GVALICMFWPDESTYQNKRH')
mutation = '{}{}{}'.format(mutation_from, mutation_pos, mutation_to)
test_cases.append((uniprot_id, mutation,))
# %% Everything is missing
sql_query = """
select ud.uniprot_id, us.uniprot_sequence, udt.domain_def
from {db_schema}.uniprot_domain ud
join {db_schema}.uniprot_domain_template udt using (uniprot_domain_id)
join {db_schema}.uniprot_domain_pair udp on (udp.uniprot_domain_id_1 = ud.uniprot_domain_id)
join {db_schema}.uniprot_domain_pair_template udpt using (uniprot_domain_pair_id)
join {db_schema_uniprot}.uniprot_sequence us using (uniprot_id)
where uniprot_id not in
(select uniprot_id from {db_schema}.provean)
and uniprot_domain_id not in
(select uniprot_domain_id from {db_schema}.uniprot_domain_model)
and uniprot_domain_pair_id not in
(select uniprot_domain_pair_id from {db_schema}.uniprot_domain_pair_model)
and CHAR_LENGTH(us.uniprot_sequence) < 1000
and db = 'sp'
limit 1000;
""".format(
db_schema=conf.CONFIGS['db_schema'],
db_schema_uniprot=conf.CONFIGS['db_schema_uniprot'])
df = pd.read_sql_query(sql_query, conf.CONFIGS['engine'])
logger.debug("Everything is missing: {}".format(len(df)))
if df.empty:
logger.error("Skipping...")
else:
append_test_cases(df)
# %% Have provean and domain model but not interface model
sql_query = """
select ud.uniprot_id, us.uniprot_sequence, udm.model_domain_def
from {db_schema}.uniprot_domain ud
join {db_schema}.provean using (uniprot_id)
join {db_schema}.uniprot_domain_template using (uniprot_domain_id)
join {db_schema}.uniprot_domain_model udm using (uniprot_domain_id)
join {db_schema}.uniprot_domain_pair udp on (udp.uniprot_domain_id_1 = ud.uniprot_domain_id)
join {db_schema}.uniprot_domain_pair_template using (uniprot_domain_pair_id)
join {db_schema_uniprot}.uniprot_sequence us using (uniprot_id)
where uniprot_domain_pair_id not in
(select uniprot_domain_pair_id from {db_schema}.uniprot_domain_pair_model)
and CHAR_LENGTH(us.uniprot_sequence) < 1000
and db = 'sp'
limit 1000;
""".format(
db_schema=conf.CONFIGS['db_schema'],
db_schema_uniprot=conf.CONFIGS['db_schema_uniprot'])
df = pd.read_sql_query(sql_query, conf.CONFIGS['engine'])
logger.debug("Have provean and domain model but not interface model: {}".format(len(df)))
if df.empty:
logger.error("Skipping...")
elif not QUICK:
append_test_cases(df)
# %% Have provean and all models
sql_query = """
select ud.uniprot_id, us.uniprot_sequence, udpm.interacting_aa_1
from {db_schema}.uniprot_domain ud
join {db_schema}.provean using (uniprot_id)
join {db_schema}.uniprot_domain_template using (uniprot_domain_id)
join {db_schema}.uniprot_domain_model using (uniprot_domain_id)
join {db_schema}.uniprot_domain_pair udp on (udp.uniprot_domain_id_1 = ud.uniprot_domain_id)
join {db_schema}.uniprot_domain_pair_template using (uniprot_domain_pair_id)
join {db_schema}.uniprot_domain_pair_model udpm using (uniprot_domain_pair_id)
join {db_schema_uniprot}.uniprot_sequence us using (uniprot_id)
where CHAR_LENGTH(us.uniprot_sequence) < 1000
and db = 'sp'
and udpm.model_filename is not null
and udpm.model_errors is null
limit 1000;
""".format(
db_schema=conf.CONFIGS['db_schema'],
db_schema_uniprot=conf.CONFIGS['db_schema_uniprot'])
df = pd.read_sql_query(sql_query, conf.CONFIGS['engine'])
logger.debug("Have provean and all models: {}".format(len(df)))
if df.empty:
logger.error("Skipping...")
else:
append_test_cases(df, 10)
# %% Fixtures
@pytest.fixture(scope='session', params=test_cases)
def uniprot_id_mutation(request):
return request.param
# %% Tests
def test_database_pipeline(uniprot_id_mutation):
return helper_fns.run_database_pipeline(uniprot_id_mutation)
# %%
if __name__ == '__main__':
import pytest
pytest.main([__file__, '-svx', '--quick'])
|
mit
|
nomadcube/scikit-learn
|
sklearn/ensemble/tests/test_base.py
|
284
|
1328
|
"""
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
|
bsd-3-clause
|
tgsmith61591/pyramid
|
examples/arima/example_add_new_samples.py
|
1
|
2463
|
"""
=====================================
Adding new observations to your model
=====================================
This example demonstrates how to add new ground truth
observations to your model so that forecasting continues
with respect to true, observed values. This also slightly
updates the model parameters, taking several new steps from
the existing model parameters.
.. raw:: html
<br/>
"""
print(__doc__)
# Author: Taylor Smith <[email protected]>
import pmdarima as pm
from pmdarima import model_selection
import matplotlib.pyplot as plt
import numpy as np
# #############################################################################
# Load the data and split it into separate pieces
data = pm.datasets.load_lynx()
train, test = model_selection.train_test_split(data, train_size=100)
# #############################################################################
# Fit with some validation (cv) samples
arima = pm.auto_arima(train, start_p=1, start_q=1, d=0, max_p=5, max_q=5,
out_of_sample_size=10, suppress_warnings=True,
stepwise=True, error_action='ignore')
# Now plot the results and the forecast for the test set
preds, conf_int = arima.predict(n_periods=test.shape[0],
return_conf_int=True)
fig, axes = plt.subplots(2, 1, figsize=(12, 8))
x_axis = np.arange(train.shape[0] + preds.shape[0])
axes[0].plot(x_axis[:train.shape[0]], train, alpha=0.75)
axes[0].scatter(x_axis[train.shape[0]:], preds, alpha=0.4, marker='o')
axes[0].scatter(x_axis[train.shape[0]:], test, alpha=0.4, marker='x')
axes[0].fill_between(x_axis[-preds.shape[0]:], conf_int[:, 0], conf_int[:, 1],
alpha=0.1, color='b')
# fill the section where we "held out" samples in our model fit
axes[0].set_title("Train samples & forecasted test samples")
# Now add the actual samples to the model and create NEW forecasts
arima.update(test)
new_preds, new_conf_int = arima.predict(n_periods=10, return_conf_int=True)
new_x_axis = np.arange(data.shape[0] + 10)
axes[1].plot(new_x_axis[:data.shape[0]], data, alpha=0.75)
axes[1].scatter(new_x_axis[data.shape[0]:], new_preds, alpha=0.4, marker='o')
axes[1].fill_between(new_x_axis[-new_preds.shape[0]:],
new_conf_int[:, 0],
new_conf_int[:, 1],
alpha=0.1, color='g')
axes[1].set_title("Added new observed values with new forecasts")
plt.show()
|
mit
|
clairetang6/bokeh
|
bokeh/charts/tests/test_comp_glyphs.py
|
3
|
4100
|
import numpy as np
import pandas as pd
from bokeh.charts.models import CompositeGlyph
from bokeh.charts.glyphs import (AreaGlyph, LineGlyph, PointGlyph, StepGlyph,
BarGlyph, BoxGlyph, BarGlyph)
from bokeh.charts.operations import stack
from bokeh.charts.stats import stats
from bokeh.models import ColumnDataSource
def test_area_base_values(test_data):
"""Test creating chart data source from array-like list data."""
x = pd.Series(test_data.array_data[0])
y = pd.Series(test_data.array_data[1])
ag = AreaGlyph(x=x, y=y)
assert ag.source.data['y_values'][0][0] == 0
assert ag.source.data['y_values'][0][-1] == 0
def test_xyglyph_xy_range():
def check_bounds(xyg, xmin=0, xmax=4, ymin=1, ymax=5):
assert xyg.x_min == xmin
assert xyg.x_max == xmax
assert xyg.y_min == ymin
assert xyg.y_max == ymax
for Glyph in [LineGlyph, PointGlyph]:
x = pd.Series([0, 1, 2, 3, 4])
y = pd.Series([5, 4, 3, 2, 1])
xyg = Glyph(x=x, y=y)
check_bounds(xyg)
x[1] = x[2] = np.nan
xyg = Glyph(x=x, y=y)
check_bounds(xyg)
x[0] = np.nan
xyg = Glyph(x=x, y=y)
check_bounds(xyg, xmin=3)
x[4] = np.nan
xyg = Glyph(x=x, y=y)
check_bounds(xyg, xmin=3, xmax=3)
y[1] = y[2] = np.nan
xyg = Glyph(x=x, y=y)
check_bounds(xyg, xmin=3, xmax=3)
y[0] = np.nan
xyg = Glyph(x=x, y=y)
check_bounds(xyg, xmin=3, xmax=3, ymax=2)
y[4] = np.nan
xyg = Glyph(x=x, y=y)
check_bounds(xyg, xmin=3, xmax=3, ymax=2, ymin=2)
def test_comp_glyph_no_inputs():
cg = CompositeGlyph()
assert isinstance(cg.data, dict)
assert isinstance(cg.df, pd.DataFrame)
assert isinstance(cg.source, ColumnDataSource)
def test_comp_glyph_array_input(test_data):
cg = CompositeGlyph(values=test_data.array_data[0])
assert cg.data['values'] is not None
def test_step_glyph():
xx = [-2, 0, 1, 3, 4, 5, 6, 9]
dates = np.array(['2016-05-%02i' % i for i in range(1, 9)], dtype=np.datetime64)
values = [1, 2, 3, 2, 1, 5, 4, 5]
# Test with integer x
g = StepGlyph(x=xx, y=values)
data = g.renderers[0].data_source.data
for i in range(0, len(xx)-1):
assert data['x_values'][i*2+0] == xx[i+0]
assert data['x_values'][i*2+1] == xx[i+1]
# Test with dates (#3616)
g = StepGlyph(x=dates, y=values)
data = g.renderers[0].data_source.data
for i in range(0, len(xx)-1):
assert data['x_values'][i*2+0] == dates[i+0]
assert data['x_values'][i*2+1] == dates[i+1]
# operations
def test_bar_stacking():
bar1, bar2, bar3 = BarGlyph('a', 5), BarGlyph('a', 2), BarGlyph('b', 2)
stack(bar1, bar2, bar3)
# are stacked
assert bar1.y_max == bar2.y_min
# not stacked
assert bar3.y_min == 0.0
def test_area_stacking():
area1 = AreaGlyph(x=[1, 2, 3, 4, 5], y=[2, 9, 2, 5, 10])
area2 = AreaGlyph(x=[1, 2, 3, 4, 5], y=[1, 1, 1, 1, 1])
stack(area1, area2)
area2_stacked_values = [0, 3, 10, 3, 6, 11, 0, 0, 10, 5, 2, 9, 2, 0]
comparison = pd.Series(area2_stacked_values) - pd.Series(area2.df[
'y_values'].values[0])
assert comparison.sum() == 0
def test_boxplot():
# test source: https://en.wikipedia.org/wiki/Interquartile_range
data=[102, 104, 105, 107, 108, 109, 110, 112, 115, 116, 118]
box = BoxGlyph(label={'cat': 'a'}, values=data, color='red')
assert box.q1 == 106
assert box.q2 == 109
assert box.q3 == 113.5
assert box.iqr == 7.5
# test Interquartile range do not exceed data limits
assert box.w0 == 102
assert box.w1 == 118
def test_bar_single_value():
data=[500]
for stat in stats.keys():
bar = BarGlyph(label={'cat': 'a'}, values=data, color='red', agg='sum')
assert bar.get_start() == 0
if stats != 'count':
assert bar.get_end() == 500
else:
assert bar.get_end() == 1
|
bsd-3-clause
|
Sh1n/AML-ALL-classifier
|
step5.py
|
1
|
4101
|
import Orange
import logging
import random
from sklearn.externals import joblib
from utils import *
from sklearn import svm
from sklearn import cross_validation
from sklearn.metrics import f1_score, precision_recall_fscore_support
from sklearn.feature_extraction import DictVectorizer
import numpy as np
from sklearn import preprocessing
# Vars
testsetPercentage = .2
validationsetPercentage = .3
progress = False
baseline = .9496
C = 0.5
kernel = 'poly'
previousStep = 4
# Utilities
logging.basicConfig(filename='main.log',level=logging.DEBUG,format='%(levelname)s\t%(message)s')
def logmessage(message, color):
print color(message)
logging.info(message)
def copyDataset(dataset):
return Orange.data.Table(dataset)
# Compute S Threshold
# ============================================================================ #
boxmessage("Starting Phase 5: Model Learning", warning)
trainingSet = Orange.data.Table("step%s_trainingset.tab" % previousStep)
validationSet = Orange.data.Table("step%s_validationset.tab" % previousStep)
trainingSet.randomGenerator = Orange.orange.RandomGenerator(random.randint(0, 10))
logmessage("Feature Selected Working Dataset Loaded", success)
logmessage("Training dataset is %s" % len(trainingSet), info)
logmessage("Training dataset features are %s" % len(trainingSet.domain), info)
logmessage("Validation dataset is %s" % len(validationSet), info)
logmessage("Validation dataset features are %s" % len(validationSet.domain), info)
# ============================================================================ #
le = preprocessing.LabelEncoder()
#le.fit() # All the possible labels for all classes
# =========================== #
# Encode all labels
labels = [d[f].value for f in trainingSet.domain for d in trainingSet] + [d[f].value for f in validationSet.domain for d in validationSet] + ['?']
le.fit(labels)
with open("labelencoder", "wb") as out_file:
pickle.dump(le, out_file)
# =========================== #
# Convert Train Dataset
# Apply transformation, from labels to you know what I mean
converted_train_data = ([le.transform([ d[f].value for f in trainingSet.domain if f != trainingSet.domain.class_var]) for d in trainingSet])
# Weights
ALL = trainingSet.select(gene='ALL')
AML = trainingSet.select(gene='AML')
#converted_train_data = [dict(enumerate(d)) for d in converted_train_data]
#converted_train_data = vector.fit_transform(converted_train_data)
logmessage("Validation dataset is %s" % len(validationSet), info)
logmessage("Validation dataset features are %s" % len(validationSet.domain), info)
print len(converted_train_data)
print len(converted_train_data[0])
converted_train_targets = le.transform([d[trainingSet.domain.class_var].value for d in trainingSet ])
print converted_train_targets
clf = svm.SVC(kernel=kernel,C=C)
clf.fit(converted_train_data, converted_train_targets)
logmessage("Model learnt", success)
# Performances
# Convert Test Dataset
converted_test_data = ([le.transform([ d[f].value for f in validationSet.domain if f != validationSet.domain.class_var]) for d in validationSet])
#converted_test_data = [dict(enumerate(d)) for d in converted_test_data]
#converted_test_data = vector.fit_transform(converted_test_data)
print len(converted_test_data)
print len(converted_test_data[0])
converted_test_targets = le.transform([d[validationSet.domain.class_var].value for d in validationSet ])
logmessage("Starting Prediction Task", info)
prediction = clf.predict(converted_test_data)
print "Predicted \t", prediction
print "Truth \t",converted_test_targets
p, r, f1, support = precision_recall_fscore_support(converted_test_targets, prediction)
# Save scores
#scores = open('scores', 'a')
#scores.write("%s\n" % (np.average(f1)))
#scores.close()
f1_avg = np.average(f1)
logmessage("Average F1(Over 2 classes): %s" % f1_avg, info)
if f1_avg > baseline:
logmessage("Performance Increased", success)
else:
logmessage("Performance Decreased", error)
logmessage("Saving Model", info)
joblib.dump(clf, 'classifier.model')
# =============================================================================
|
gpl-2.0
|
dh4gan/oberon
|
plot/plot_positions.py
|
1
|
1034
|
'''
Created on 7/3/14
@author: dh4gan
Show the positions of the bodies in the system
'''
from sys import argv
from matplotlib import pyplot as plt
import io_oberon.io_nbody
# Data file can be read from the command line or from argument
if len(argv)==1:
input_file = raw_input("Enter the datafile: ")
else:
input_file = argv[1]
tmax = 0.0
time, bodyarray, number_bodies = io_oberon.io_nbody.read_nbody_datafile(input_file, tmax)
for i in xrange(number_bodies):
fig = plt.figure(i)
plt.suptitle(str(bodyarray[i].bodytype))
plt.subplot(211)
plt.xlabel("X Position [ AU ]")
plt.ylabel("Y Position [ AU ]")
plt.plot(bodyarray[i].x, bodyarray[i].y, '.', color='red')
plt.subplot(212)
plt.plot(time,bodyarray[i].vx, '.',color='blue', label='$v_x$')
plt.plot(time, bodyarray[i].vy, '.',color='green', label='$v_y$')
plt.xlabel("Time [ years ]")
plt.ylabel("Velocity [ AU / year ]")
plt.legend(loc='lower right')
plt.show()
|
gpl-3.0
|
materialsproject/MPContribs
|
mpcontribs-api/mpcontribs/api/redox_thermo_csp/views.py
|
1
|
88690
|
import re
import json
import os
import flask_mongorest
from flask import request, Blueprint
import pandas as pd
from pandas.io.json._normalize import nested_to_record
from itertools import groupby
from scipy.optimize import brentq
from scipy.constants import pi, R
from scipy.integrate import quad
import pymatgen.core.periodic_table as ptable
from pymatgen.core.composition import Composition
from mpcontribs.api.core import SwaggerView
from mpcontribs.api.contributions.document import Contributions
from mpcontribs.api.contributions.views import ContributionsResource
from mpcontribs.api.tables.document import Tables
templates = os.path.join(os.path.dirname(flask_mongorest.__file__), "templates")
redox_thermo_csp = Blueprint("redox_thermo_csp", __name__, template_folder=templates)
def split_comp(compstr):
"""
Splits a string containing the composition of a perovskite solid solution into its components
Chemical composition: (am_1, am_2)(tm_1, tm_2)Ox
:param compstr: composition as a string
:return: am_1, am_2, tm_1, tm_2;
each of these output variables contains the species and the stoichiometries
i.e. ("Fe", 0.6)
"""
am_1, am_2, tm_1, tm_2 = None, None, None, None
compstr_spl = ["".join(g) for _, g in groupby(str(compstr), str.isalpha)]
for l in range(len(compstr_spl)):
try:
if (
ptable.Element(compstr_spl[l]).is_alkaline
or ptable.Element(compstr_spl[l]).is_alkali
or ptable.Element(compstr_spl[l]).is_rare_earth_metal
):
if am_1 is None:
am_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
elif am_2 is None:
am_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
if ptable.Element(compstr_spl[l]).is_transition_metal and not (
ptable.Element(compstr_spl[l]).is_rare_earth_metal
):
if tm_1 is None:
tm_1 = [compstr_spl[l], float(compstr_spl[l + 1])]
elif tm_2 is None:
tm_2 = [compstr_spl[l], float(compstr_spl[l + 1])]
# stoichiometries raise ValueErrors in pymatgen .is_alkaline etc., ignore these errors and skip that entry
except ValueError:
pass
return am_1, am_2, tm_1, tm_2
def remove_comp_one(compstr):
compspl = split_comp(compstr=compstr)
compstr_rem = ""
for i in range(len(compspl)):
if compspl[i]:
if float(compspl[i][1]) != 1:
compstr_rem = compstr_rem + str(compspl[i][0]) + str(compspl[i][1])
else:
compstr_rem = compstr_rem + str(compspl[i][0])
compstr_rem = compstr_rem + "Ox"
return compstr_rem
def add_comp_one(compstr):
"""
Adds stoichiometries of 1 to compstr that don't have them
:param compstr: composition as a string
:return: compositon with stoichiometries of 1 added
"""
sample = re.sub(r"([A-Z])", r" \1", compstr).split()
sample = ["".join(g) for _, g in groupby(str(sample), str.isalpha)]
samp_new = ""
for k in range(len(sample)):
spl_samp = re.sub(r"([A-Z])", r" \1", sample[k]).split()
for l in range(len(spl_samp)):
if spl_samp[l][-1].isalpha() and spl_samp[l][-1] != "x":
spl_samp[l] = spl_samp[l] + "1"
samp_new += spl_samp[l]
return samp_new
def s_th_o(temp):
# constants: Chase, NIST-JANAF Thermochemistry tables, Fourth Edition, 1998
if temp < 700:
shomdat = [
31.32234,
-20.23531,
57.86644,
-36.50624,
-0.007374,
-8.903471,
246.7945,
]
elif temp < 2000:
shomdat = [
30.03235,
8.772972,
-3.988133,
0.788313,
-0.741599,
-11.32468,
236.1663,
]
else:
shomdat = [
20.91111,
10.72071,
-2.020498,
0.146449,
9.245722,
5.337651,
237.6185,
]
temp_frac = temp / 1000.0
szero = shomdat[0] * pd.np.log(temp_frac)
szero += shomdat[1] * temp_frac
szero += 0.5 * shomdat[2] * temp_frac ** 2
szero += shomdat[3] / 3.0 * temp_frac ** 3
szero -= shomdat[4] / (2 * temp_frac ** 2)
szero += shomdat[6]
return 0.5 * szero
def rootfind(a, b, args, funciso_here):
solutioniso = 0
try:
solutioniso = brentq(
funciso_here, 0.01, 0.49, args=args
) # works for most cases
except ValueError: # starting values a,b for cases where 0.01/0.49 are not sign changing
try:
solutioniso = brentq(funciso_here, a, b, args=args)
except ValueError:
solutioniso = None # if no solution can be found
return solutioniso
def enth_arctan(x, dh_max, dh_min, t, s):
"""
arctan function to fit enthalpy values of solid solutions
:param x: Delta_delta, change in non-stoichiometric redox extent vs. a reference
:param t: transition point; x value at which the reaction enthalpy of the solid solution
is exactly the average of dh_max and dh_min
:param s: slope, measure for the preference of B species reduction over A species reduction
"""
return (((dh_max - dh_min) / pi) * (pd.np.arctan((x - t) * s) + (pi / 2))) + dh_min
def entr_fe(x, fit_param_fe):
"""
Calculates the entropy values for SrFeOx based on the fit parameters in fit_param_fe
:param x: absolute delta
:return: dS of SrFeOx at delta = x with delta_0 accounted for
"""
return (
fit_param_fe[0] / 2
+ fit_param_fe[1]
+ (2 * fit_param_fe[2] * R * (pd.np.log(0.5 - x) - pd.np.log(x)))
)
def entr_mixed(x, s, shift, delta_0, act_s1, fit_param_fe):
"""
Returns a fit function for entropies based on the arctan function and the dilute species model fit of SrFeOx
(see docstring in Atanfit.entropy)
:param x: absolute delta
:param s: slope, measure for the preference of B species reduction over A species reduction
:param shift: constant entropy shift
:param delta_0: shift from absolute delta
:return: dS of solid solution at delta = x with delta_0
"""
efe = entr_fe(x + delta_0, fit_param_fe)
return (
((act_s1 * efe) / pi) * (pd.np.arctan((x - delta_0) * s) + pi / 2)
+ (1 - act_s1) * efe
+ shift
)
def entr_dilute_spec(x, s_v, a, delta_0, s_th_o):
"""
:param x: Delta_delta, change in non-stoichiometric redox extent vs. a reference
:param s_v: change in the lattice vibrational entropy caused by introducing vacancies
:param a: indicates the degrees of freedom of the defects, a < 1: additional defect ordering
:param delta_0: initial non-stoichiometry at Delta_m = 0 (reference point of the mass change data,
typically T = 400 deg C, p_O2 = 0.18 bar
Delta = delta_0 + Delta_delta
:return: fit function based on the model in Bulfin et. al., doi: 10.1039/C7TA00822H
"""
return (
s_th_o
+ s_v
+ (2 * a * R * (pd.np.log(0.5 - (x + delta_0)) - pd.np.log(x + delta_0)))
)
def dh_ds(delta, s_th, p):
d_delta = delta - p["delta_0"]
dh_pars = [p[f"fit_param_enth.{c}"] for c in "abcd"]
dh = enth_arctan(d_delta, *(dh_pars)) * 1000.0
ds_pars = [p[f"fit_par_ent.{c}"] for c in "abc"]
# distinguish two differnt entropy fits
fit_type = p["fit_type_entr"]
if fit_type == "Solid_Solution":
ds_pars.append(p["act_mat"])
ds_pars.append([p[f"fit_param_fe.{c}"] for c in "abcd"])
ds = entr_mixed(delta - p[f"fit_par_ent.c"], *ds_pars)
else:
ds_pars.append(s_th)
ds = entr_dilute_spec(delta - p["fit_par_ent.c"], *ds_pars)
return dh, ds
def funciso(delta, iso, x, p, s_th):
dh, ds = dh_ds(delta, s_th, p)
return dh - x * ds + R * iso * x / 2
def funciso_theo(delta, iso, x, p, t_d_perov, t_d_brownm, dh_min, dh_max, act):
dh = d_h_num_dev_calc(delta=delta, dh_1=dh_min, dh_2=dh_max, temp=x, act=act)
ds = d_s_fundamental(
delta=delta,
dh_1=dh_min,
dh_2=dh_max,
temp=x,
act=act,
t_d_perov=t_d_perov,
t_d_brownm=t_d_brownm,
)
return dh - x * ds + R * iso * x / 2
def d_h_num_dev_calc(delta, dh_1, dh_2, temp, act):
"""
Calculates dH using the numerical derivative with f(x0) + f(x0+h) / h
this function is split up in f(x0) and f(x0+h) for simplification and understanding
:param delta: non-stoichiometry delta
:param dh_1: reaction enthalpy of perovskite 1
:param dh_2: reaction enthalpy of perovskite 2
:param temp: temperature in K
:return: enthalpy change dH
"""
return -(
(0.5 * d_h_num_dev_0(delta, dh_1, dh_2, temp, act))
- (0.5 * d_h_num_dev_1(delta, dh_1, dh_2, temp, act))
) / ((1 / (R * temp)) - (1 / (R * (temp + 0.01))))
def d_s_fundamental(delta, dh_1, dh_2, temp, act, t_d_perov, t_d_brownm):
"""
dG = dH - T*dS, at dG = 0 => dh/T = dS
entropy of solid solution:
dS = s_con + s_th with s_th = 0.5*s_zero(O2) + s_vib
"""
# partial molar entropy of oxygen release as a function of the temperature
p_mol_ent_o = s_th_o(temp)
# configurational entropy
p_o_2_l = pd.np.log(
p_o2_calc(delta=delta, dh_1=dh_1, dh_2=dh_2, temp=temp, act=act)
)
entr_con = entr_con_mixed(temp=temp, p_o2_l=p_o_2_l, dh_1=dh_1, dh_2=dh_2, act=act)
# vibrational entropy
entr_vib = vib_ent(temp=temp, t_d_perov=t_d_perov, t_d_brownm=t_d_brownm)
# sum
d_s = p_mol_ent_o + entr_con + entr_vib
return d_s
def d_h_num_dev_0(delta, dh_1, dh_2, temp, act):
"""
Part of the numerical derivative calculation used to find dH as a function of delta and temperature
This function is f(x0) in f(x0) + f(x0+h) / h
:param delta: non-stoichiometry delta
:param dh_1: reaction enthalpy of perovskite 1
:param dh_2: reaction enthalpy of perovskite 2
:param temp: temperature in K
:return: natural logarithm of result at x = x0
"""
result_0 = p_o2_calc(delta, dh_1, dh_2, temp, act)
return pd.np.log(result_0)
def d_h_num_dev_1(delta, dh_1, dh_2, temp, act):
"""
Part of the numerical derivative calculation used to find dH as a function of delta and temperature
This function is f(x0+h) in f(x0) + f(x0+h) / h
:param delta: non-stoichiometry delta
:param dh_1: reaction enthalpy of perovskite 1
:param dh_2: reaction enthalpy of perovskite 2
:param temp: temperature in K
:return: natural logarithm of result at x = x0 + h
"""
result_1 = p_o2_calc(delta, dh_1, dh_2, temp + 0.01, act)
return pd.np.log(result_1)
def entr_con_mixed(temp, p_o2_l, dh_1, dh_2, act):
"""
Reference: Brendan Bulfin et. al. DOI: 10.1039/C6CP03158G
Configurational entropy of a solid solution
:param temp: temperature in K
:param p_o2_l: natural logarithm of the oxygen partial pressure
:param dh_1: redox enthalpy of the first endmember of the solid solution
:param dh_2: redox enthalpy of the second endmember of the solid solution
:return: configurational entropy
"""
a = 2
stho = s_th_o(temp)
# fix reversed orders
if dh_1 > dh_2:
dh_2_old = dh_2
dh_2 = dh_1
dh_1 = dh_2_old
# avoiding errors due to division by zero
if act == 0:
delta_max_1 = 1e-10
else:
delta_max_1 = act * 0.5
if act == 1:
delta_max_2 = 0.5 - 1e-10
else:
delta_max_2 = 0.5 - (act * 0.5)
delta_1 = delta_fun(stho, temp, p_o2_l, dh_1, (act / 2))
delta_2 = delta_fun(stho, temp, p_o2_l, dh_2, ((1 - act) / 2))
if delta_1 > 0.0:
entr_con_1 = (
(1 / delta_max_1)
* (a / 2)
* R
* (pd.np.log(delta_max_1 - delta_1) - pd.np.log(delta_1))
* (delta_1 / (delta_1 + delta_2))
)
else:
entr_con_1 = 0.0
if delta_2 > 0.0:
entr_con_2 = (
(1 / delta_max_2)
* (a / 2)
* R
* (pd.np.log(delta_max_2 - delta_2) - pd.np.log(delta_2))
* (delta_2 / (delta_1 + delta_2))
)
else:
entr_con_2 = 0.0
return entr_con_1 + entr_con_2
def vib_ent(temp, t_d_perov, t_d_brownm):
"""
Vibrational entropy based on the Debye model
:param temp: temperature
:param delta: non-stoichiometry delta
:return: vibrational entropy
"""
# integral for vibrational entropy using the Debye model
def s_int(temp, t_d):
def d_y(temp, t_d):
y = t_d / temp
def integrand(x):
return x ** 3 / (pd.np.exp(x) - 1)
if temp != 0:
integral_y = quad(integrand, 0, y)[0]
d = integral_y * (3 / (y ** 3))
else:
d = 0
return d
y = t_d / temp
s = R * (-3 * pd.np.log(1 - pd.np.exp(-y)) + 4 * d_y(temp, t_d))
return s
s_perov = s_int(temp, t_d_perov)
s_brownm = s_int(temp, t_d_brownm)
return 2 * s_perov - (2 * s_brownm)
def p_o2_calc(delta, dh_1, dh_2, temp, act):
"""
Calculates the oxygen partial pressure p_O2 of a perovskite solid solution with two redox-active species
:param delta: non-stoichiometry delta
:param dh_1: reaction enthalpy of perovskite 1
:param dh_2: reaction enthalpy of perovskite 2
:param temp: temperature in K
:return: p_O2 as absolute value
"""
def fun_p_o2(p_o2):
return delta_mix(temp, p_o2, dh_1, dh_2, act) - delta
try:
sol_p_o2_l = brentq(fun_p_o2, a=-100, b=100)
except ValueError:
sol_p_o2_l = brentq(fun_p_o2, a=-300, b=300)
return pd.np.exp(sol_p_o2_l)
def delta_mix(temp, p_o2_l, dh_1, dh_2, act):
"""
Calculates the total non-stoichiometry delta of a perovskite solid solution with two redox-active species
:param temp: temperature in K
:param p_o2_l: oxygen partial pressure as natural logarithm
:param dh_1: reaction enthalpy of perovskite 1
:param dh_2: reaction enthalpy of perovskite 2
:return: total non-stoichiometry delta
"""
stho = s_th_o(temp)
return delta_fun(stho, temp, p_o2_l, dh_1, (act / 2)) + +delta_fun(
stho, temp, p_o2_l, dh_2, ((1 - act) / 2)
)
def delta_fun(stho, temp, p_o2_l, dh, d_max):
common = pd.np.exp(stho * d_max / R)
common *= pd.np.exp(p_o2_l) ** (-d_max / 2.0)
common *= pd.np.exp(-dh * d_max / (R * temp))
return d_max * common / (1.0 + common)
def funciso_redox(po2, delta, x, p, s_th):
dh, ds = dh_ds(delta, s_th, p)
return dh - x * ds + R * po2 * x / 2
def funciso_redox_theo(po2, delta, x, p, t_d_perov, t_d_brownm, dh_min, dh_max, act):
dh = d_h_num_dev_calc(delta=delta, dh_1=dh_min, dh_2=dh_max, temp=x, act=act)
ds = d_s_fundamental(
delta=delta,
dh_1=dh_min,
dh_2=dh_max,
temp=x,
act=act,
t_d_perov=t_d_perov,
t_d_brownm=t_d_brownm,
)
return dh - x * ds + R * po2 * x / 2
def isobar_line_elling(iso, x):
return -R * iso * x / 2
def init_isographs(cid, plot_type, payload):
mask = ["identifier", "data"]
contrib = Contributions.objects.only(*mask).get(id=cid)
data = {}
for k, v in nested_to_record(contrib.data, sep=".").items():
if not k.endswith(".display") and not k.endswith(".unit"):
if k.endswith(".value"):
kk = k.rsplit(".", 1)[0]
data[kk] = float(v.to_decimal())
else:
data[k] = v
data["compstr_disp"] = remove_comp_one(data["formula"]) # for user display
if data["compstr_disp"] == data["formula"]:
data["formula"] = add_comp_one(
data["formula"]
) # compstr must contain '1' such as in "Sr1Fe1Ox"
data["compstr_disp"] = [
"".join(g) for _, g in groupby(str(data["compstr_disp"]), str.isalpha)
]
data["experimental_data_available"] = data.get("fit_type_entr")
if data["experimental_data_available"]:
data["compstr_exp"] = data["oxidized_phase.composition"]
data["compstr_exp"] = [
"".join(g) for _, g in groupby(str(data["compstr_exp"]), str.isalpha)
]
else:
data["compstr_exp"] = "n.a."
data["td_perov"] = data["debye_temp.perovskite"]
data["td_brownm"] = data["debye_temp.brownmillerite"]
data["tens_avail"] = data["tensors_available"]
a, b = 1e-10, 0.5 - 1e-10 # limiting values for non-stoichiometry delta in brentq
if plot_type == "isotherm": # pressure on the x-axis
x_val = pd.np.log(pd.np.logspace(payload["rng"][0], payload["rng"][1], num=100))
elif not payload.get("rng"): # dH or dS # delta on the x-axis
x_val = pd.np.linspace(0.01, 0.49, num=100)
else: # temperature on the x-axis
x_val = pd.np.linspace(payload["rng"][0], payload["rng"][1], num=100)
return data, a, b, x_val
class IsographView(SwaggerView):
resource = ContributionsResource
def get(self, cid, plot_type):
"""Retrieve RedoxThermoCSP Isograph data for a single contribution.
---
operationId: get_redox_thermo_csp_iso
parameters:
- name: cid
in: path
type: string
pattern: '^[a-f0-9]{24}$'
required: true
description: contribution ID (ObjectId)
- name: plot_type
in: path
type: string
required: true
enum: [isotherm, isobar, isoredox, enthalpy_dH, entropy_dS, ellingham]
description: type of isograph
- name: iso
in: query
type: number
required: true
description: iso value
- name: rng
in: query
type: array
items:
type: number
minItems: 2
maxItems: 2
description: comma-separated graph range
- name: del
in: query
type: number
description: delta value
responses:
200:
description: Isograph data as defined by contributor
schema:
type: array
items:
type: object
"""
rng = request.args.get("rng")
if rng:
rng = list(map(float, rng.split(",")))
iso = float(request.args["iso"])
payload = {"iso": iso, "rng": rng}
pars, a, b, x_val = init_isographs(cid, plot_type, payload)
if plot_type == "ellingham":
payload["iso"] = pd.np.log(10 ** payload["iso"])
delt = request.args.get("del")
if delt:
payload["del"] = float(delt)
resiso, resiso_theo, ellingiso = [], [], []
if pars[
"experimental_data_available"
]: # only execute this if experimental data is available
for xv in x_val: # calculate experimental data
try:
if plot_type in ["isobar", "isoredox", "ellingham"]:
s_th = s_th_o(xv)
args = (payload["iso"], xv, pars, s_th)
elif plot_type == "isotherm":
s_th = s_th_o(payload["iso"])
args = (xv, payload["iso"], pars, s_th)
elif plot_type == "enthalpy_dH" or plot_type == "entropy_dS":
s_th = s_th_o(payload["iso"])
args = (payload["iso"], xv, pars, s_th)
if plot_type == "isoredox":
solutioniso = brentq(funciso_redox, -300, 300, args=args)
resiso.append(pd.np.exp(solutioniso))
elif plot_type == "isotherm" or plot_type == "isobar":
solutioniso = rootfind(a, b, args, funciso)
resiso.append(solutioniso)
elif plot_type == "enthalpy_dH":
solutioniso = dh_ds(xv, args[-1], args[-2])[0] / 1000
resiso.append(solutioniso)
elif plot_type == "entropy_dS":
solutioniso = dh_ds(xv, args[-1], args[-2])[1]
resiso.append(solutioniso)
elif plot_type == "ellingham":
dh_ds_vals = dh_ds(payload["del"], args[-1], args[-2])
solutioniso = (dh_ds_vals[0] - dh_ds_vals[1] * xv) / 1000.0
resiso.append(solutioniso)
ellingiso.append(isobar_line_elling(args[0], xv) / 1000.0)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso.append(None)
res_interp, res_fit = [], []
for delta_val, res_i in zip(x_val, resiso): # show interpolation
if (
pars["delta_min"] < delta_val < pars["delta_max"]
): # result within experimentally covered δ range
res_fit.append(res_i)
res_interp.append(None)
else: # result outside this range
res_fit.append(None)
res_interp.append(res_i)
else:
res_fit, res_interp = (
None,
None,
) # don't plot any experimental data if it is not available
try: # calculate theoretical data
for xv in x_val[
::4
]: # use less data points for theoretical graphs to improve speed
if plot_type in [
"isobar",
"isoredox",
"enthalpy_dH",
"entropy_dS",
"ellingham",
]:
args_theo = (payload["iso"], xv)
elif plot_type == "isotherm":
args_theo = (xv, payload["iso"])
args_theo = args_theo + (
pars,
pars["td_perov"],
pars["td_brownm"],
pars["dh_min"],
pars["dh_max"],
pars["act_mat"],
)
if plot_type == "isoredox":
try:
solutioniso_theo = brentq(
funciso_redox_theo, -300, 300, args=args_theo
)
except ValueError:
solutioniso_theo = brentq(
funciso_redox_theo, -100, 100, args=args_theo
)
resiso_theo.append(pd.np.exp(solutioniso_theo))
elif plot_type == "isotherm" or plot_type == "isobar":
solutioniso_theo = rootfind(a, b, args_theo, funciso_theo)
resiso_theo.append(solutioniso_theo)
elif plot_type == "enthalpy_dH":
solutioniso_theo = (
d_h_num_dev_calc(
delta=xv,
dh_1=pars["dh_min"],
dh_2=pars["dh_max"],
temp=payload["iso"],
act=pars["act_mat"],
)
/ 1000.0
)
resiso_theo.append(solutioniso_theo)
elif plot_type == "entropy_dS":
solutioniso_theo = d_s_fundamental(
delta=xv,
dh_1=pars["dh_min"],
dh_2=pars["dh_max"],
temp=payload["iso"],
act=pars["act_mat"],
t_d_perov=pars["td_perov"],
t_d_brownm=pars["td_brownm"],
)
resiso_theo.append(solutioniso_theo)
elif plot_type == "ellingham":
dh = d_h_num_dev_calc(
delta=payload["del"],
dh_1=pars["dh_min"],
dh_2=pars["dh_max"],
temp=xv,
act=pars["act_mat"],
)
ds = d_s_fundamental(
delta=payload["del"],
dh_1=pars["dh_min"],
dh_2=pars["dh_max"],
temp=xv,
act=pars["act_mat"],
t_d_perov=pars["td_perov"],
t_d_brownm=pars["td_brownm"],
)
solutioniso_theo = (dh - ds * xv) / 1000.0
resiso_theo.append(solutioniso_theo)
except ValueError: # if brentq function finds no zero point due to plot out of range
resiso_theo.append(None)
x = list(pd.np.exp(x_val)) if plot_type == "isotherm" else list(x_val)
x_theo = x[::4]
x_exp = None
if pars["experimental_data_available"]:
x_exp = x
elif plot_type == "ellingham":
x_exp = None
for xv in x_theo:
ellingiso.append(isobar_line_elling(payload["iso"], xv) / 1000.0)
y_min, y_max = 0, 0
if plot_type == "enthalpy_dH":
if max(pd.np.append(resiso, resiso_theo)) > (
pars["dh_max"] * 0.0015
): # limiting values for the plot
y_max = pars["dh_max"] * 0.0015
else:
y_max = max(pd.np.append(resiso, resiso_theo)) * 1.2
if min(pd.np.append(resiso, resiso_theo)) < -10:
y_min = -10
else:
y_min = min(pd.np.append(resiso, resiso_theo)) * 0.8
elif plot_type == "entropy_dS":
y_min = -10 # limiting values for the plot
if max(pd.np.append(resiso, resiso_theo)) > 250:
y_max = 250
else:
y_max = max(pd.np.append(resiso, resiso_theo)) * 1.2
response = [
{
"x": x_exp,
"y": res_fit,
"name": "exp_fit",
"line": {"color": "rgb(5,103,166)", "width": 2.5},
},
{
"x": x_exp,
"y": res_interp,
"name": "exp_interp",
"line": {"color": "rgb(5,103,166)", "width": 2.5, "dash": "dot"},
},
{
"x": x_theo,
"y": resiso_theo,
"name": "theo",
"line": {"color": "rgb(217,64,41)", "width": 2.5},
},
[y_min, y_max],
[
pars["compstr_disp"],
pars["compstr_exp"],
pars["tens_avail"],
pars["last_updated"],
],
]
if plot_type == "ellingham":
response[-2] = {
"x": x_theo if x_exp is None else x_exp,
"y": ellingiso,
"name": "isobar line",
"line": {"color": "rgb(100,100,100)", "width": 2.5},
}
return {"data": response}
class WaterSplitting:
@staticmethod
def dg_zero_water_splitting(temp):
"""
Uses linear fits of data in Barin, Thermochemical Data of Pure Substances
Only valid for steam!
:return: dg_zero
"""
dg_zero = ((-0.052489 * temp) + 245.039) * 1000
return dg_zero
@staticmethod
def k_water_splitting(temp):
"""
Get the equilibrium constant of water
:param temp: temperature in K
:return: equilibrium constant
"""
dg_zero = WaterSplitting().dg_zero_water_splitting(temp)
k_eq = pd.np.exp(dg_zero / (-R * temp))
return k_eq
@staticmethod
def get_h2_h2o_ratio(temp, po2):
"""
Converts an oxygen partial pressure into a ratio of H2 to H2O for water splitting
:param temp: temperature in K
:param po2: oxygen partial pressure
:return: ratio of H2 to H2O
"""
h2_h2o = WaterSplitting().k_water_splitting(temp) / pd.np.sqrt(po2)
return h2_h2o
@staticmethod
def get_po2(temp, h2_h2o):
"""
Converts a ratio of H2 to H2O for water splitting into an oxygen partial pressure
:param temp: temperature in K
:param h2_h2o: ratio of H2 to H2O
:return: oxygen partial pressure
"""
po2 = (WaterSplitting().k_water_splitting(temp) / h2_h2o) ** 2
return po2
class CO2Splitting:
@staticmethod
def dg_zero_co2_splitting(temp):
"""
Uses linear fits of data in Barin, Thermochemical Data of Pure Substances
:return: dg_zero
"""
dg_zero_co2 = (temp ** 2) * 9.44e-7 - (0.0032113 * temp) - 393.523
dg_zero_co = -0.0876385 * temp - 111.908
dg_zero = (-dg_zero_co2 + dg_zero_co) * 1000
return dg_zero
@staticmethod
def k_co2_splitting(temp):
"""
Get the equilibrium constant of water
:param temp: temperature in K
:return: equilibrium constant
"""
dg_zero = CO2Splitting().dg_zero_co2_splitting(temp)
k_eq = pd.np.exp(dg_zero / (-R * temp))
return k_eq
@staticmethod
def get_co_co2_ratio(temp, po2):
"""
Converts an oxygen partial pressure into a ratio of CO to CO2 for water spltting
:param temp: temperature in K
:param po2: oxygen partial pressure
:return: ratio of H2 to H2O
"""
h2_h2o = CO2Splitting().k_co2_splitting(temp) / pd.np.sqrt(po2)
return h2_h2o
@staticmethod
def get_po2(temp, co_co2):
"""
Converts a ratio of CO to CO2 for water splitting into an oxygen partial pressure
:param temp: temperature in K
:param h2_h2o: ratio of H2 to H2O
:return: oxygen partial pressure
"""
po2 = (CO2Splitting().k_co2_splitting(temp) / co_co2) ** 2
return po2
class EnergyAnalysis:
"""
Analyze the energy input for different redox cycles
"""
def __init__(self, process="Air Separation"):
self.process = process
@staticmethod
def c_p_water_liquid(temp):
"""
Calculates the heat capacity of liquid water.
:return: cp_water
"""
# constants: Chase, NIST-JANAF Thermochemistry tables, Fourth Edition, 1998
shomdat = [-203.6060, 1523.290, -3196.413, 2474.455, 3.855326]
temp_frac = temp / 1000
c_p_water = (
shomdat[0]
+ (shomdat[1] * temp_frac)
+ (shomdat[2] * (temp_frac ** 2))
+ (shomdat[3] * (temp_frac ** 3))
+ (shomdat[4] / (temp_frac ** 2))
)
return c_p_water
@staticmethod
def c_p_steam(temp):
"""
Calculates the heat capacity of steam
:return: cp_steam
"""
if temp < 1700:
shomdat = [30.09200, 6.832514, 6.793435, -2.534480, 0.082139]
else:
shomdat = [41.96126, 8.622053, -1.499780, 0.098119, -11.15764]
temp_frac = temp / 1000
c_p_steam = (
shomdat[0] + (shomdat[1] * temp_frac) + (shomdat[2] * (temp_frac ** 2))
)
c_p_steam += (shomdat[3] * (temp_frac ** 3)) + (shomdat[4] / (temp_frac ** 2))
return c_p_steam
@staticmethod
def get_heat_capacity(temp, td):
# credits to Dr. Joseph Montoya, LBNL
t_ratio = temp / td
def integrand(x):
return (x ** 4 * pd.np.exp(x)) / (pd.np.exp(x) - 1) ** 2
if isinstance(t_ratio, int) or isinstance(t_ratio, float):
cv_p = 9 * R * (t_ratio ** 3) * quad(integrand, 0, t_ratio ** -1)[0]
else:
cv_p = []
for i in range(len(t_ratio)):
cv_i = (
9 * R * (t_ratio[i] ** 3) * quad(integrand, 0, t_ratio[i] ** -1)[0]
)
cv_p = pd.np.append(cv_p, cv_i)
return cv_p * 5
@staticmethod
def get_heat_capacity_mixed(temp, delta, td_p=None, td_b=None):
enal = EnergyAnalysis()
cv_p = enal.get_heat_capacity(temp, td_p) * 5
cv_b = enal.get_heat_capacity(temp, td_b) * 4.5
ratio_p = (0.5 - delta) / 0.5
ratio_b = delta / 0.5
cv_m = pd.np.multiply(ratio_p, cv_p) + pd.np.multiply(ratio_b, cv_b)
return temp, cv_m
@staticmethod
def heat_input_linear(
temp_1, temp_2, delta_1, delta_2, t_d_perov, t_d_brownm, num=40
):
"""
Uses an approximation to calculate the integral c(T, delta)*T dT by splitting the interval into a number of
slices with constant c
Uses a linear approximation for delta between delta_1 and delta_2
This method is a lot faster than the actual integration and the errors of the approximation are negligible
(at default settings: < 1E-5, typically approx. 1E-6)
:param temp_1: initial temperature(s)
:param temp_2: final temperature(s)
:param delta_1: initial non-stoichiometry value(s)
:param delta_2: final non-stoichiometry values(s)
:param num: number of steps for the approximation of the integral
:return: heat input to heat perovskite from temp_1 to temp_2 considering the change in delta (in J)
positive for heating, negative for cooling
"""
try:
# treatment of arrays for output of multiple data points
dqs = []
if not (isinstance(temp_1, float) or (isinstance(temp_1, int))):
for i in range(len(temp_1)):
tempval = pd.np.linspace(temp_1[i], temp_2[i], num=num)
deltaval = pd.np.linspace(delta_1[i], delta_2[i], num=num)
# calculate average values within intervals
delta_x0_x1 = pd.np.empty(len(deltaval) - 1)
for i in range(len(deltaval) - 1):
delta_x0_x1[i] = (deltaval[i] + deltaval[i + 1]) / 2
temp_x0_x1 = pd.np.empty(len(tempval) - 1)
for i in range(len(tempval) - 1):
temp_x0_x1[i] = (tempval[i] + tempval[i + 1]) / 2
# length of a temperature step
del_temp = (temp_2 - temp_1) / len(temp_x0_x1)
# calculate the area under the step for each step
dq = 0
for i in range(len(delta_x0_x1)):
cv_step = EnergyAnalysis().get_heat_capacity_mixed(
temp_x0_x1[i],
delta_x0_x1[i],
td_p=t_d_perov,
td_b=t_d_brownm,
)[1]
q_step = cv_step * del_temp
dq += q_step
dqs = pd.np.append(dqs, dq)
dq = dqs
else:
tempval = pd.np.linspace(temp_1, temp_2, num=num)
deltaval = pd.np.linspace(delta_1, delta_2, num=num)
# calculate average values within intervals
delta_x0_x1 = pd.np.empty(len(deltaval) - 1)
for i in range(len(deltaval) - 1):
delta_x0_x1[i] = (deltaval[i] + deltaval[i + 1]) / 2
temp_x0_x1 = pd.np.empty(len(tempval) - 1)
for i in range(len(tempval) - 1):
temp_x0_x1[i] = (tempval[i] + tempval[i + 1]) / 2
# length of a temperature step
del_temp = (temp_2 - temp_1) / len(temp_x0_x1)
# calculate the area under the step for each step
dq = 0
for i in range(len(delta_x0_x1)):
cv_step = EnergyAnalysis().get_heat_capacity_mixed(
temp_x0_x1[i], delta_x0_x1[i], td_p=t_d_perov, td_b=t_d_brownm
)[1]
q_step = cv_step * del_temp
dq += q_step
except TypeError:
dq = None
raise ValueError(
"Elastic tensors or crystal structures not available for this set of materials."
)
return dq
@staticmethod
def energy_steam_generation(temp_1, temp_2, h_2_h2o, celsius=True, h_rec=0.0):
"""
Calculates the energy required to heat water, evaporate it and to generate steam at temperature "temp"
Assuming water at ambient pressure, boiling point 100 °C
:param temp_1: initial temperature of water/steam
:param temp_2: steam temperature
:param h_2_h2o: partial pressure ratio h2/h2o
:param celsius: if True, temperature values are assumed to be in degrees celsius
:param h_rec: heat recovery efficiency, can be between 0 and 1
:return: energy required to generate steam per mol of H2 in the product stream in kJ/mol
"""
if celsius:
temp_1 = temp_1 + 273.15
temp_2 = temp_2 + 273.15
enal = EnergyAnalysis()
# liquid water (at ambient pressure)
# this code only considers water at ambient pressure!
if temp_1 < 373.15:
if temp_2 > 373.15:
energy_1 = quad(enal.c_p_water_liquid, temp_1, 373.15)[0]
else:
energy_1 = quad(enal.c_p_water_liquid, temp_1, temp_2)[0]
else:
energy_1 = 0
if temp_2 > 373.15:
if temp_1 < 373.15:
energy_2 = quad(enal.c_p_steam, 373.15, temp_2)[0]
else:
energy_2 = quad(enal.c_p_steam, temp_1, temp_2)[0]
else:
energy_2 = 0
# from the literature
heat_vaporization = 40790
if temp_1 < 373.15 < temp_2:
total_energy = energy_1 + energy_2 + heat_vaporization
else:
total_energy = energy_1 + energy_2
# per mol of H2
total_energy = total_energy / h_2_h2o
# considering heat recovery
total_energy = total_energy * (1 - h_rec)
return total_energy / 1000
@staticmethod
def energy_integral_theo(
enth_steps,
celsius,
temp_1,
temp_2,
compstr,
dh_min,
dh_max,
t_d_perov,
t_d_brownm,
p_o_2_1,
p_o_2_2,
):
"""
Determines the chemical energy change using theoretical data. All variables explained in
EnergyAnalysis.calc
"""
# To get a good approximation of the integral over the enthalpy values, the area under the curve is calculated
# stepwise. The actual integral calculation would take too long, as each enthalpy value is calculated
# numerically
# We are only considering the case of linear change of both pressure and temperature between reduction and
# oxidation here
if celsius:
tempval = pd.np.linspace(temp_1 + 273.15, temp_2 + 273.15, num=enth_steps)
else:
tempval = pd.np.linspace(temp_1, temp_2, num=enth_steps)
p_val = pd.np.logspace(
pd.np.log10(p_o_2_1), pd.np.log10(p_o_2_2), num=enth_steps
)
sample_spl = split_comp(compstr)
act = find_active(mat_comp=sample_spl)[1]
delta_vals = []
for i in range(len(tempval)):
args_theo = (
pd.np.log(p_val[i]),
tempval[i],
None,
t_d_perov,
t_d_brownm,
dh_min,
dh_max,
act,
)
delta_val_i = rootfind(1e-10, 0.5 - 1e-10, args_theo, funciso_theo)
delta_vals = pd.np.append(delta_vals, delta_val_i)
dh_vals = []
for i in range(len(tempval)):
dh_i = d_h_num_dev_calc(
delta=delta_vals[i], dh_1=dh_min, dh_2=dh_max, temp=tempval[i], act=act
)
dh_vals = pd.np.append(dh_vals, dh_i)
# calculate energy stepwise
energy_red = []
for i in range(len(delta_vals) - 1):
# deltastep * average dh
h_x0_x1_i = (dh_vals[i] + dh_vals[i + 1]) / 2
energy_i = (delta_vals[i + 1] - delta_vals[i]) * h_x0_x1_i
energy_red = pd.np.append(energy_red, energy_i)
energy_integral_dh = sum(energy_red) / 1000
return energy_integral_dh
@staticmethod
def mechanical_envelope(p_red):
"""
Uses the "mechanical envelope" function from Stefan Brendelberger et al.
dx.doi.org/10.1016/j.solener.2016.11.023
Estimates the energy required to pump one mol of oxygen at this pressure using mechanical pumps.
:param p_red: oxygen partial pressure at reduction conditions
:return: pump_ener_envelope: mechanical energy required to pump one mol of O
"""
if (p_red < 1e-6) or (p_red > 0.7):
q_pump = float("inf") # mechanical envelope not applicable in this range
else:
eff_sol = 0.4
temp = 473 # this is the operating temperature of the pump
a0 = 0.30557
a1 = -0.17808
a2 = -0.15514
a3 = -0.03173
a4 = -0.00203
p0 = 1e5
p = p_red * p0
eff = (
a0
+ a1 * pd.np.log10(p / p0)
+ a2 * (pd.np.log10(p / p0)) ** 2
+ a3 * (pd.np.log10(p / p0)) ** 3
+ a4 * (pd.np.log10(p / p0)) ** 4
)
q_iso = R * temp * pd.np.log(p0 / p)
q_pump = (q_iso / eff) / eff_sol
q_pump = q_pump / 2000
return q_pump
@staticmethod
def dhf_h2o(t_ox):
"""
Gets the heat of formation of water for at certain temperature
Based on the Shomate equation and the NIST-JANAF thermochemical tables
H° − H°298.15= A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F − H
H° = A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F
https://webbook.nist.gov/cgi/cbook.cgi?ID=C7732185&Units=SI&Mask=1#Thermo-Gas
"""
if t_ox <= 1700:
a = 30.09200
b = 6.832514
c = 6.793435
d = -2.534480
e = 0.082139
f = -250.8810
else:
a = 41.96426
b = 8.622053
c = -1.499780
d = 0.098119
e = -11.15764
f = -272.1797
t_1000 = t_ox / 1000
hform = a * t_1000
hform += 0.5 * b * (t_1000 ** 2)
hform += (1 / 3.0) * c * (t_1000 ** 3)
hform += (1 / 4.0) * c * (t_1000 ** 4)
hform += -e / t_1000
hform += f
return hform
@staticmethod
def dh_co_co2(t_ox):
"""
Gets the heat of formation of CO2 and of CO and returns the difference to get the heat of reaction
Based on the Shomate equation and the NIST-JANAF thermochemical tables
H° − H°298.15= A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F − H
H° = A*t + B*t2/2 + C*t3/3 + D*t4/4 − E/t + F
CO2: https://webbook.nist.gov/cgi/cbook.cgi?ID=C124389&Units=SI&Mask=1#Thermo-Gas
CO: https://webbook.nist.gov/cgi/cbook.cgi?ID=C630080&Units=SI&Mask=1#Thermo-Gas
"""
t_1000 = t_ox / 1000
# CO2
if t_ox <= 1200:
a = 24.99735
b = 55.18696
c = -33.69137
d = 7.948387
e = -0.136638
f = -403.6075
else:
a = 58.16639
b = 2.720074
c = -0.492289
d = 0.038844
e = -6.447293
f = -425.9186
hco2 = a * t_1000
hco2 += 0.5 * b * (t_1000 ** 2)
hco2 += (1 / 3) * c * (t_1000 ** 3)
hco2 += (1 / 4) * c * (t_1000 ** 4)
hco2 += -e / t_1000
hco2 += f
# CO
if t_ox <= 1300:
a = 25.56759
b = 6.096130
c = 4.054656
d = -2.671301
e = 0.131021
f = -118.0089
else:
a = 35.15070
b = 1.300095
c = -0.205921
d = 0.013550
e = -3.282780
f = -127.8375
hco = a * t_1000
hco += 0.5 * b * (t_1000 ** 2)
hco += (1 / 3) * c * (t_1000 ** 3)
hco += (1 / 4) * c * (t_1000 ** 4)
hco += -e / t_1000
hco += f
return hco2 - hco
def calc(
self,
p_ox,
p_red,
t_ox,
t_red,
data_origin="Exp",
data_use="combined",
enth_steps=30,
sample_ident=-1,
celsius=True,
from_file=True,
heat_cap=True,
heat_cap_approx=True,
):
"""
Performs an energy analysis using experimental data.
:param p_ox: Oxidation partial pressure of oxygen (in bar) or ratio p(H2)/p(H2O) / p(CO)/p(CO2)
:param p_red: Oxygen partial pressure for reduction (in bar)
:param t_ox: Oxidation temperature
:param t_red: Reduction temperature
:param data_origin: "Exp": experimental data
"Theo": theoretical data
***only relevant if 'data_origin' = "Theo"
:param data_use:
"endmembers": uses redox members of solid solution endmembers to estimate redox
enthalpies of solid solutions
"combined": corrects above-mentioned data by the actual redox enthalpies for the solid
solutions calcualted via DFT
:param enth_steps: number of enthalpy values which are calculated for each material in order to
reach a good approximation of the integral over dH vs. delta
:param sample_ident: Sample number(s) (experimental data) or composition (theoretical data),
default value '-1'-> analyze all samples
:param pump_ener: allows to consider the pumping energy required to pump from p_o_2_1 to p_o_2_2
input in kJ per kg of redox material in the oxidized state + the losses
This depends on many factors, such as the type of pumps used, the volume of the
reaction chamber, the reactor type etc., so the user needs to calculate this
value beforehand depending on the individual process conditions
In case some of the pumping energy can be recovered, this share needs to be
subtracted beforehand, as it is not considered herein.
:param celsius: if True, assumes all input temperatures are in °C instead of K
:param from_file: if True, takes the enthalpies, Debye temperatures, and materials lists from
the file "theo_redenth_debye.json". Much faster than using the MPRester
Only works if sample_ident = -1
:param heat_cap: if True, sensible energy to heat the samples is considered
:param heat_cap_approx: if True, uses values for SrFeOx in case of missing heat capacity data
:return: dict_result: dictonary with results for different materials
"""
si_first = sample_ident
# correct temperature values for Kelvin/Celsius
if celsius:
temp_1_corr = t_ox + 273.15
temp_2_corr = t_red + 273.15
else:
temp_1_corr = t_ox
temp_2_corr = t_red
if data_origin == "Exp": # currently not in use for updates of existing data
# load experimental sample data from file
path = os.path.abspath("")
filepath = os.path.join(path, "exp_data.json")
with open(filepath) as handle:
expdata = json.loads(handle.read())
# use equivalent partial pressures for Water Splitting and CO2 splitting
if self.process == "Water Splitting":
p_ox = WaterSplitting().get_po2(temp=temp_1_corr, h2_h2o=p_ox)
elif self.process == "CO2 Splitting":
p_ox = CO2Splitting().get_po2(temp=temp_1_corr, co_co2=p_ox)
# iterate over samples
if isinstance(sample_ident, collections.Sized) and not isinstance(
sample_ident, str
):
no_range = range(len(sample_ident))
sample = None
else:
no_range = range(1)
if data_origin == "Exp":
sample = int(sample_ident)
else:
sample = str(sample_ident)
# iterate over all available samples
if sample_ident == -1:
sample = None
if data_origin == "Exp":
no_range = range(0, 150)
sample_ident = no_range
else:
if not from_file:
filename = os.path.join(
os.path.abspath(".."),
"datafiles",
"perovskite_theo_list.csv",
)
if not os.path.exists(filename):
raise ImportError(
"File 'perovskite_theo_list.csv' not found."
)
fo = open(filename, "rb")
sample_ident = pd.np.genfromtxt(
fo, dtype="str", delimiter=",", skip_header=1
)
fo.close()
else:
sampledata = views.get_theo_data()
sample_ident = sampledata["compstr"]
no_range = range(len(sample_ident))
sample_l, chemical_energy_l, sensible_energy_l, mol_mass_ox_l, prodstr_alt_l = (
[],
[],
[],
[],
[],
)
mol_prod_mol_red_l, t_ox_l, t_red_l, p_ox_l, p_red_l, compstr_l = (
[],
[],
[],
[],
[],
[],
)
(
delta_1_l,
delta_2_l,
mass_redox_l,
prodstr_l,
l_prod_kg_red_l,
g_prod_kg_red_l,
) = ([], [], [], [], [], [])
for i in no_range:
if not sample:
sample = sample_ident[i]
# this only works if the sample number/data exists
try:
if data_origin == "Exp":
exp_index = -1
for k in range(len(expdata)):
if int(expdata["Sample number"][k]) == sample:
exp_index = k
if exp_index == -1:
raise ValueError("Experimental data for this sample not found.")
compstr = expdata["theo_compstr"][exp_index]
compstr_x = compstr.split("Ox")[0]
# this formats the parameters the same way we have them in views.py
fit_param_enth = {
"a": float(expdata["dH_max"][exp_index]),
"b": float(expdata["dH_min"][exp_index]),
"c": float(expdata["t"][exp_index]),
"d": float(expdata["s"][exp_index]),
}
fit_type_entr = str(expdata["fit type entropy"][exp_index])
if fit_type_entr == "Dilute_Species":
fit_par_ent = {
"a": float(expdata["entr_dil_s_v"][exp_index]),
"b": float(expdata["entr_dil_a"][exp_index]),
"c": float(expdata["delta_0"][exp_index]),
}
else:
fit_par_ent = {
"a": float(expdata["entr_solid_sol_s"][exp_index]),
"b": float(expdata["entr_solid_sol_shift"][exp_index]),
"c": float(expdata["delta_0"][exp_index]),
}
theo_compstr = compstr
splitcomp = split_comp(compstr)
delta_0 = float(expdata["delta_0"][exp_index])
actf = find_active(mat_comp=splitcomp)[1]
act_mat = {"Material": float(actf)}
fit_param_fe = {
"a": 231.062,
"b": -24.3338,
"c": 0.839785,
"d": 0.219157,
}
pars = {
"fit_par_ent": fit_par_ent,
"fit_param_enth": fit_param_enth,
"fit_type_entr": fit_type_entr,
"delta_0": delta_0,
"fit_param_fe": fit_param_fe,
"act_mat": act_mat,
}
args_1 = (pd.np.log(p_ox), temp_1_corr, pars, s_th_o(temp_1_corr))
args_2 = (pd.np.log(p_red), temp_2_corr, pars, s_th_o(temp_2_corr))
delta_1 = rootfind(1e-10, 0.5 - 1e-10, args_1, funciso)
delta_2 = rootfind(1e-10, 0.5 - 1e-10, args_2, funciso)
# use theoretical elastic tensors
sampledata = views.get_theo_data()
for z in range(len(sampledata["compstr"])):
if (sampledata["compstr"][z]).split("O3")[0] == compstr.split(
"Ox"
)[0]:
index_debye = z
t_d_perov = float(sampledata["Debye temp perovskite"][index_debye])
t_d_brownm = float(
sampledata["Debye temp brownmillerite"][index_debye]
)
else:
# if composition does not contain ones as stoichiometries, add them
sample = add_comp_one(compstr=sample)
if not from_file or si_first != -1:
try:
red_active = redenth_act(sample)
except TypeError:
raise ValueError(
"Enthalpy data not available for this material."
)
h_min = red_active[1]
h_max = red_active[2]
act = red_active[3]
else:
h_min = float(sampledata["dH_min"][i])
h_max = float(sampledata["dH_max"][i])
act = float(sampledata["act"][i])
compstr = sample
compstr_x = compstr.split("O")[0]
if not from_file or si_first != -1:
try: # get Debye temperatures for vibrational entropy
mp_ids = get_mpids_comps_perov_brownm(compstr=compstr)
t_d_perov = get_debye_temp(mp_ids[0])
t_d_brownm = get_debye_temp(mp_ids[1])
except Exception as e: # if no elastic tensors or no data for this material is available
mp_ids = (
"mp-510624",
"mp-561589",
) # using data for SrFeOx if no data is available (close approximation)
t_d_perov = get_debye_temp(mp_ids[0])
t_d_brownm = get_debye_temp(mp_ids[1])
else:
t_d_perov = float(sampledata["Debye temp perovskite"][i])
t_d_brownm = float(sampledata["Debye temp brownmillerite"][i])
args_theo_1 = (
pd.np.log(p_ox),
temp_1_corr,
None,
t_d_perov,
t_d_brownm,
h_min,
h_max,
act,
)
delta_1 = rootfind(1e-10, 0.5 - 1e-10, args_theo_1, funciso_theo)
args_theo_2 = (
pd.np.log(p_red),
temp_2_corr,
None,
t_d_perov,
t_d_brownm,
h_min,
h_max,
act,
)
delta_2 = rootfind(1e-10, 0.5 - 1e-10, args_theo_2, funciso_theo)
# calculate the mass change in %
comp_ox = compstr_x + "O" + str(float(3 - delta_1))
comp_red = compstr_x + "O" + str(float(3 - delta_2))
mol_mass_ox = float(Composition(comp_ox).weight)
mol_mass_red = float(Composition(comp_red).weight)
mass_redox_i = ((mol_mass_ox - mol_mass_red) / mol_mass_ox) * 100
# define reaction products
if self.process == "Air Separation":
prodstr = "O2"
prodstr_alt = "O"
elif self.process == "Water Splitting":
prodstr = "H2"
prodstr_alt = prodstr
elif self.process == "CO2 Splitting":
prodstr = "CO"
prodstr_alt = prodstr
else:
raise ValueError(
"Process must be either Air Separation, Water Splitting, or CO2 Splitting!"
)
# only continue if the user-designated reduction step actually leads to reduction
# if not, set result to infinite
if delta_2 < delta_1:
ener_i = pd.np.ones(5) * float("inf")
per_kg_redox = pd.np.ones(5) * float("inf")
per_kg_wh_redox = pd.np.ones(5) * float("inf")
kj_mol_prod = pd.np.ones(5) * float("inf")
energy_l = pd.np.ones(5) * float("inf")
energy_l_wh = pd.np.ones(5) * float("inf")
efficiency = float("inf")
mol_prod_mol_red = float("inf")
l_prod_kg_red = float("inf")
g_prod_kg_red = float("inf")
else:
# mol product per mol of redox material
mol_prod_mol_red = delta_2 - delta_1
# L product per kg of redox material (SATP)
l_prod_kg_red = mol_prod_mol_red * (24.465 / (0.001 * mol_mass_ox))
# convert mol O to mol O2
if self.process == "Air Separation":
l_prod_kg_red = l_prod_kg_red * 0.5
# g product per kg redox material
g_prod_kg_red = float(Composition(prodstr).weight) * (
l_prod_kg_red / 24.465
)
if data_origin == "Exp":
d_delta = delta_0
else:
d_delta = 0.0
# correct for d_delta
d_delta_1 = delta_1 - d_delta
d_delta_2 = delta_2 - d_delta
# chemical energy
if data_origin == "Exp":
s_th_mean = (s_th_o(temp_1_corr) + s_th_o(temp_1_corr)) / 2
def dh_func_exp(d_delta_func):
return dh_ds(d_delta_func, s_th_mean, pars)[0]
energy_integral_dh = quad(dh_func_exp, d_delta_1, d_delta_2)[0]
if energy_integral_dh < 0:
raise ValueError(
"negative chemical energy due to insuffiencent experimental data...skipping this sample"
)
else:
energy_integral_dh = EnergyAnalysis(
process=self.process
).energy_integral_theo(
celsius=celsius,
compstr=compstr,
dh_max=h_max,
dh_min=h_min,
enth_steps=enth_steps,
p_o_2_1=p_ox,
p_o_2_2=p_red,
temp_1=t_ox,
temp_2=t_red,
t_d_perov=t_d_perov,
t_d_brownm=t_d_brownm,
)
# sensible energy
energy_sensible = 0
if heat_cap:
energy_sensible = (
EnergyAnalysis().heat_input_linear(
temp_1=temp_1_corr,
temp_2=temp_2_corr,
delta_1=delta_1,
delta_2=delta_2,
t_d_perov=t_d_perov,
t_d_brownm=t_d_brownm,
num=40,
)
/ 1000
)
chemical_energy_l.append(energy_integral_dh)
sensible_energy_l.append(energy_sensible)
mol_mass_ox_l.append(mol_mass_ox)
mol_prod_mol_red_l.append(mol_prod_mol_red)
t_ox_l.append(temp_1_corr)
t_red_l.append(temp_2_corr)
p_ox_l.append(p_ox)
p_red_l.append(p_red)
compstr_l.append(compstr)
delta_1_l.append(delta_1)
delta_2_l.append(delta_2)
mass_redox_l.append(mass_redox_i)
prodstr_l.append(prodstr)
prodstr_alt_l.append(prodstr_alt)
l_prod_kg_red_l.append(l_prod_kg_red)
g_prod_kg_red_l.append(g_prod_kg_red)
# skip this sample if the sample number does not exist
except Exception as e:
pass
# print("No data for sample " + str(sample) + " found!" + str(e))
sample = None
resdict = {
"Chemical Energy": chemical_energy_l,
"Sensible Energy": sensible_energy_l,
"mol_mass_ox": mol_mass_ox_l,
"mol_prod_mol_red": mol_prod_mol_red_l,
"T_ox": t_ox_l,
"T_red": t_red_l,
"p_ox": p_ox_l,
"p_red": p_red_l,
"compstr": compstr_l,
"delta_1": delta_1_l,
"delta_2": delta_2_l,
"mass_redox": mass_redox_l,
"prodstr": prodstr_l,
"prodstr_alt": prodstr_alt_l,
"l_prod_kg_red": l_prod_kg_red_l,
"g_prod_kg_red": g_prod_kg_red_l,
}
return resdict
def on_the_fly(
self,
resdict,
pump_ener,
w_feed,
h_rec,
h_rec_steam,
celsius=True,
h_val="high",
p_ox_wscs=0,
rem_unstable=True,
):
"""
Allows to calculate the energy input for different conditions rather quickly, without having to re-calculate
the time-intensive chemical and sensible energy every time again
:param resdict: dictionary with results (mainly for chemical and sesible energy, as calculated by
EnergyAnalysis().calc()
:param pump_ener: allows to consider the pumping energy required to pump from p_o_2_1 to p_o_2_2
input in kJ per kg of redox material in the oxidized state + the losses
This depends on many factors, such as the type of pumps used, the volume of the
reaction chamber, the reactor type etc., so the user needs to calculate this
value beforehand depending on the individual process conditions
In case some of the pumping energy can be recovered, this share needs to be
subtracted beforehand, as it is not considered herein.
:param h_rec: heat recovery efficiency factor (0...1) for chemical and sensible energy
***these values are only relevant for water splitting***
:param h_rec_steam: heat recovery efficiency factor (0...1) for recovery of heat stored in the steam
:param w_feed: water inlet temperature (in °C or K as defined by 'celsius')
:param h_val: heating value of hydrogen: 'low' -> lower heating value,
'high' -> higher heating value
:param p_ox_wscs: ratio H2/H2O / ratio CO/CO2
:param rem_unstable: if True, phases which are potentially unstable for chemical reasons are removed
this is based on the phases in "unstable_phases.json"
currently, phases are excluded for the following reasons:
- tolerance factor below 0.9 (e.g. EuCuO3, which cannot be synthesized as opposed to EuFeO3)
- phases with expected high covalency (V5+ cations, for instance, NaVO3 is stable but not a perovskite)
- phases with expected low melting point (Mo5+ cations, see this article for NaMoO3
http://www.journal.csj.jp/doi/pdf/10.1246/bcsj.64.161)
By default, this is always True and there is no way in the user front-end to change this.
However, this could be changed manually by the developers, if neccessary.
"""
if self.process == "Air Separation":
p_ox_wscs = 1
# initialize result variables
result_val_ener_i = pd.np.empty(6)
result_val_per_kg_redox = pd.np.empty(6)
result_val_per_kg_wh_redox = pd.np.empty(6)
result_val_per_kj_mol_prod = pd.np.empty(6)
result_val_per_energy_l = pd.np.empty(6)
result_val_per_energy_l_wh = pd.np.empty(6)
result_val_efficiency = pd.np.empty(2)
result_val_mol_prod_mol_red = pd.np.empty(2)
result_val_l_prod_kg_red = pd.np.empty(2)
result_val_g_prod_kg_red = pd.np.empty(2)
result_val_delta_redox = pd.np.empty(2)
result_val_mass_change = pd.np.empty(2)
for rd in resdict:
chemical_energy = rd["Chemical Energy"]
energy_sensible = rd["Sensible Energy"]
t_ox = rd["T_ox"]
t_red = rd["T_red"]
t_mean = (t_ox + t_red) / 2
delta_1 = rd["delta_1"]
delta_2 = rd["delta_2"]
g_prod_kg_red = rd["g_prod_kg_red"]
l_prod_kg_red = rd["l_prod_kg_red"]
mass_redox_i = rd["mass_redox"]
mol_mass_ox = rd["mol_mass_ox"]
mol_prod_mol_red = rd["mol_prod_mol_red"]
p_ox = rd["p_ox"]
p_red = rd["p_red"]
compstr = rd["compstr"]
prodstr = rd["prodstr"]
prodstr_alt = rd["prodstr_alt"]
unstable = rd["unstable"]
# chemical energy stored in products
if self.process == "Water Splitting":
dh_wscs = EnergyAnalysis().dhf_h2o(t_mean) * mol_prod_mol_red
elif self.process == "CO2 Splitting":
dh_wscs = EnergyAnalysis().dh_co_co2(t_mean) * mol_prod_mol_red
else:
dh_wscs = 0
energy_integral_dh = chemical_energy - ((chemical_energy + dh_wscs) * h_rec)
if len(resdict) < 50: # for experimental data: convert J/mol to kJ/mol
energy_integral_dh = energy_integral_dh / 1000
# wscs does not matter, as no water splitting / co2 splitting is considered for exp data
# pumping energy
if pump_ener != -1:
energy_pumping = (float(pump_ener) * mol_mass_ox) / 1000
else: # using mechanical envelope
# per mol O
energy_pumping = EnergyAnalysis().mechanical_envelope(p_red=p_red)
# per mol material
energy_pumping = energy_pumping * mol_prod_mol_red
# steam generation
if self.process == "Water Splitting" and h_rec_steam != 1:
energy_steam = mol_prod_mol_red * EnergyAnalysis().energy_steam_generation(
temp_1=w_feed,
temp_2=((t_ox + t_red) * 0.5) - 273.15,
h_2_h2o=p_ox_wscs,
celsius=celsius,
h_rec=h_rec_steam,
)
else:
energy_steam = 0
# total energy
energy_total = (
energy_integral_dh
+ energy_sensible * (1 - h_rec)
+ energy_pumping
+ energy_steam
)
ener_i = pd.np.array(
[
energy_total,
energy_integral_dh,
energy_sensible * (1 - h_rec),
energy_pumping,
energy_steam,
]
)
# kJ/kg of redox material
per_kg_redox = (ener_i / mol_mass_ox) * 1000
# Wh/kg of redox material
per_kg_wh_redox = per_kg_redox / 3.6
# kJ/mol of product (O, H2, or CO)
kj_mol_prod = ener_i / (delta_2 - delta_1)
# kJ/L of product (ideal gas at SATP)
energy_l = kj_mol_prod / 24.465
# convert from O to O2
if self.process == "Air Separation":
energy_l = 2 * energy_l
# Wh/L of product (ideal gas at SATP)
energy_l_wh = energy_l / 3.6
# calculate efficiency for water splitting
if self.process == "Water Splitting":
# source for heating values
# https://h2tools.org/node/3131
if h_val == "low":
h_v = 119.96
elif h_val == "high":
h_v = 141.88
else:
raise ValueError("heating_value must be either 'high' or 'low'")
# convert kJ/mol H2 to MJ/kg H2 -> divide by 2.016
efficiency = (h_v / (kj_mol_prod[0] / 2.016)) * 100
else:
efficiency = float("-inf")
delta_redox_i = float(delta_2 - delta_1)
mass_change_i = float(mass_redox_i)
compdisp = remove_comp_one(compstr=compstr)
invalid_val = False # remove data of unstable compounds
if rem_unstable and unstable:
invalid_val = True
# append new values to result and add compositions
if (
ener_i[0] < 0
) or invalid_val: # sort out negative values, heat input is always positive
ener_i[0] = float("inf")
res_i = pd.np.append(ener_i, compdisp)
result_val_ener_i = pd.np.vstack((result_val_ener_i, res_i))
if per_kg_redox[0] < 0 or invalid_val:
per_kg_redox[0] = float("inf")
res_i = pd.np.append(per_kg_redox, compdisp)
result_val_per_kg_redox = pd.np.vstack((result_val_per_kg_redox, res_i))
if per_kg_wh_redox[0] < 0 or invalid_val:
per_kg_wh_redox[0] = float("inf")
res_i = pd.np.append(per_kg_wh_redox, compdisp)
result_val_per_kg_wh_redox = pd.np.vstack(
(result_val_per_kg_wh_redox, res_i)
)
if kj_mol_prod[0] < 0 or invalid_val:
kj_mol_prod[0] = float("inf")
res_i = pd.np.append(kj_mol_prod, compdisp)
result_val_per_kj_mol_prod = pd.np.vstack(
(result_val_per_kj_mol_prod, res_i)
)
if energy_l[0] < 0 or invalid_val:
energy_l[0] = float("inf")
res_i = pd.np.append(energy_l, compdisp)
result_val_per_energy_l = pd.np.vstack((result_val_per_energy_l, res_i))
if energy_l_wh[0] < 0 or invalid_val:
energy_l_wh[0] = float("inf")
res_i = pd.np.append(energy_l_wh, compdisp)
result_val_per_energy_l_wh = pd.np.vstack(
(result_val_per_energy_l_wh, res_i)
)
if efficiency < 0 or invalid_val:
efficiency = float("-inf")
res_i = pd.np.append(efficiency, compdisp)
result_val_efficiency = pd.np.vstack((result_val_efficiency, res_i))
if mol_prod_mol_red < 0 or invalid_val:
mol_prod_mol_red = float("-inf")
res_i = pd.np.append(mol_prod_mol_red, compdisp)
result_val_mol_prod_mol_red = pd.np.vstack(
(result_val_mol_prod_mol_red, res_i)
)
if l_prod_kg_red < 0 or invalid_val:
l_prod_kg_red = float("-inf")
res_i = pd.np.append(l_prod_kg_red, compdisp)
result_val_l_prod_kg_red = pd.np.vstack((result_val_l_prod_kg_red, res_i))
if g_prod_kg_red < 0 or invalid_val:
g_prod_kg_red = float("-inf")
res_i = pd.np.append(g_prod_kg_red, compdisp)
result_val_g_prod_kg_red = pd.np.vstack((result_val_g_prod_kg_red, res_i))
if delta_redox_i < 0 or invalid_val:
delta_redox_i = float("-inf")
res_i = pd.np.append(delta_redox_i, compdisp)
result_val_delta_redox = pd.np.vstack((result_val_delta_redox, res_i))
if mass_change_i < 0 or invalid_val:
mass_change_i = float("-inf")
res_i = pd.np.append(mass_change_i, compdisp)
result_val_mass_change = pd.np.vstack((result_val_mass_change, res_i))
# sort results
result_val_ener_i = sorted(result_val_ener_i[1:], key=lambda x: float(x[0]))
result_val_per_kg_redox = sorted(
result_val_per_kg_redox[1:], key=lambda x: float(x[0])
)
result_val_per_kg_wh_redox = sorted(
result_val_per_kg_wh_redox[1:], key=lambda x: float(x[0])
)
result_val_per_kj_mol_prod = sorted(
result_val_per_kj_mol_prod[1:], key=lambda x: float(x[0])
)
result_val_per_energy_l = sorted(
result_val_per_energy_l[1:], key=lambda x: float(x[0])
)
result_val_per_energy_l_wh = sorted(
result_val_per_energy_l_wh[1:], key=lambda x: float(x[0])
)
if self.process == "Water Splitting":
result_val_efficiency = sorted(
result_val_efficiency[1:], key=lambda x: float(x[0]), reverse=True
)
else:
result_val_efficiency = result_val_efficiency[1:]
result_val_mol_prod_mol_red = sorted(
result_val_mol_prod_mol_red[1:], key=lambda x: float(x[0]), reverse=True
)
result_val_l_prod_kg_red = sorted(
result_val_l_prod_kg_red[1:], key=lambda x: float(x[0]), reverse=True
)
result_val_g_prod_kg_red = sorted(
result_val_g_prod_kg_red[1:], key=lambda x: float(x[0]), reverse=True
)
result_val_delta_redox = sorted(
result_val_delta_redox[1:], key=lambda x: float(x[0]), reverse=True
)
result_val_mass_change = sorted(
result_val_mass_change[1:], key=lambda x: float(x[0]), reverse=True
)
# create dictionary with results
dict_result = {
"kJ/mol redox material": result_val_ener_i,
"kJ/kg redox material": result_val_per_kg_redox,
"Wh/kg redox material": result_val_per_kg_wh_redox,
str("kJ/mol of " + prodstr_alt): result_val_per_kj_mol_prod,
str("kJ/L of " + prodstr): result_val_per_energy_l,
str("Wh/L of " + prodstr): result_val_per_energy_l_wh,
"Heat to fuel efficiency in % (only valid for Water Splitting)": result_val_efficiency,
str(
"mol " + prodstr_alt + " per mol redox material"
): result_val_mol_prod_mol_red,
str("L " + prodstr + " per mol redox material"): result_val_l_prod_kg_red,
str("g " + prodstr + " per mol redox material"): result_val_g_prod_kg_red,
"Change in non-stoichiometry between T_ox and T_red": result_val_delta_redox,
"Mass change between T_ox and T_red": result_val_mass_change,
}
return dict_result
class EnergyAnalysisView(SwaggerView):
resource = ContributionsResource
def get(self):
"""Retrieve RedoxThermoCSP Energy Analysis data.
---
operationId: get_redox_thermo_csp_energy
parameters:
- name: data_source
in: query
type: string
default: Theoretical
description: data source
- name: process_type
in: query
type: string
default: Air Separation
description: process type
- name: t_ox
in: query
type: number
default: 350
description: oxidation temperature (°C)
- name: t_red
in: query
type: number
default: 600
description: reduction temperature (°C)
- name: p_ox
in: query
type: string
default: 1e-20
description: oxidation pressure (bar)
- name: p_red
in: query
type: string
default: 1e-08
description: reduction pressure (bar)
- name: h_rec
in: query
type: number
default: 0.6
description: heat recovery efficiency
- name: mech_env
in: query
type: boolean
default: True
description: use mechanical envelope
- name: cutoff
in: query
type: number
default: 25
description: max number of materials
- name: pump_ener
in: query
type: number
default: 0
description: pumping energy (kJ/kg)
- name: w_feed
in: query
type: number
default: 200
description: water feed temperature (°C)
- name: steam_h_rec
in: query
type: number
default: 0.8
description: steam heat recovery
- name: param_disp
in: query
type: string
default: kJ/L of product
description: parameter display
responses:
200:
description: Energy Analysis data as defined by contributor
schema:
type: array
items:
type: object
"""
# generate database ID
data_source = "Theo" if request.args["data_source"] == "Theoretical" else "Exp"
process = request.args["process_type"]
if process == "Air Separation":
db_id = "AS_"
elif process == "Water Splitting":
db_id = "WS_"
else:
db_id = "CS_"
suffix = db_id + request.args["t_ox"]
db_id += (
"{:.1f}_{:.1f}_{}_{}".format(
*[
float(request.args.get(k))
for k in ["t_ox", "t_red", "p_ox", "p_red"]
]
)
+ f"_{data_source}_20.0"
)
resdict = []
proj = {"data.$": 1, "columns": 1, "contribution": 1}
for a in ["stable", "unstable"]:
for b in ["O2-O", "H2-H2", "CO-CO"]:
name = f"energy-analysis_{a}_{b}_{suffix}"
objects = Tables.objects.filter(
name=name, data__match={"0": db_id}
).fields(**proj)
for obj in objects:
keys = obj["columns"][1:]
values = map(float, obj["data"][0][1:])
dct = dict(zip(keys, values))
dct["prodstr"], dct["prodstr_alt"] = b.split("-")
dct["unstable"] = bool(a == "unstable")
dct["tid"] = obj["id"]
dct["cid"] = str(obj.contribution.id)
dct["compstr"] = obj.contribution.data["formula"]
resdict.append(dct)
response = [
{"x": None, "y": None, "name": None, "type": "bar"} for i in range(4)
]
try: # calculate specific results on the fly
# pump_ener = float(payload['pump_ener'].split("/")[0])
pump_energy = float(request.args["pump_ener"])
mech_env = request.args["mech_env"]
if (
mech_env == "true"
or mech_env == "True"
or (isinstance(mech_env, bool) and mech_env)
):
pump_ener = -1
results = EnergyAnalysis(process=process).on_the_fly(
resdict=resdict,
pump_ener=pump_ener,
w_feed=float(request.args["w_feed"]),
h_rec=float(request.args["h_rec"]),
h_rec_steam=float(request.args["steam_h_rec"]),
p_ox_wscs=float(request.args["p_ox"]),
)
prodstr = resdict[0]["prodstr"]
prodstr_alt = resdict[0]["prodstr_alt"]
param_disp = request.args["param_disp"]
if param_disp == "kJ/mol of product":
param_disp = str("kJ/mol of " + prodstr_alt)
elif param_disp == "kJ/L of product":
param_disp = str("kJ/L of " + prodstr)
elif param_disp == "Wh/L of product":
param_disp = str("Wh/L of " + prodstr)
elif param_disp == "mol product per mol redox material":
param_disp = str("mol " + prodstr_alt + " per mol redox material")
elif param_disp == "L product per mol redox material":
param_disp = str("L " + prodstr + " per mol redox material")
elif param_disp == "g product per mol redox material":
param_disp = str("g " + prodstr + " per mol redox material")
result = results[param_disp]
commonname = (
param_disp
+ ", \nT(ox)= "
+ request.args["t_ox"]
+ " °C, T(red) = "
+ request.args["t_red"]
)
if request.args["process_type"] == "Air Separation":
titlestr = (
commonname
+ " °C, p(ox)= "
+ request.args["p_ox"]
+ " bar, p(red) = "
+ request.args["p_red"]
+ " bar"
)
elif request.args["process_type"] == "CO2 Splitting":
titlestr = (
commonname
+ " °C, pCO/pCO2(ox)= "
+ request.args["p_ox"]
+ ", p(red) = "
+ request.args["p_red"]
+ " bar"
)
else: # Water Splitting
titlestr = (
commonname
+ " °C, pH2/pH2O(ox)= "
+ request.args["p_ox"]
+ ", p(red) = "
+ request.args["p_red"]
+ " bar"
)
# remove duplicates (if any)
rem_pos = -1
for elem in range(len(result)):
if elem > 0 and (result[elem][-1] == result[elem - 1][-1]):
to_remove = result[elem]
rem_pos = elem
if rem_pos > -1:
result = [i for i in result if str(i) != str(to_remove)]
result.insert(rem_pos - 1, to_remove)
result = [
i for i in result if "inf" not in str(i[0])
] # this removes all inf values
cutoff = int(
request.args["cutoff"]
) # this sets the number of materials to display in the graph
result_part = result[:cutoff] if cutoff < len(result) else result
if (
len(result_part[0]) == 2
): # output if only one y-value per material is displayed
response[0]["x"] = [i[-1] for i in result_part]
response[0]["y"] = (
pd.np.array([i[0] for i in result_part]).astype(float).tolist()
)
response[0]["name"] = param_disp
if "non-stoichiometry" in param_disp:
response[0]["name"] = (
name_0.split("between")[0] + " (Δδ)"
) # otherwise would be too long for y-axis label
if "Mass change" in param_disp:
response[0]["name"] = "mass change (%)"
if "Heat to fuel efficiency" in param_disp:
response[0]["name"] = "Heat to fuel efficiency (%)"
else: # display multiple values (such as chemical energy, sensible energy, ...)
response[0]["x"] = [i[-1] for i in result_part]
response[0]["y"] = (
pd.np.array([i[1] for i in result_part]).astype(float).tolist()
)
response[0]["name"] = "Chemical Energy"
response[1]["x"] = [i[-1] for i in result_part]
response[1]["y"] = (
pd.np.array([i[2] for i in result_part]).astype(float).tolist()
)
response[1]["name"] = "Sensible Energy"
response[2]["x"] = [i[-1] for i in result_part]
response[2]["y"] = (
pd.np.array([i[3] for i in result_part]).astype(float).tolist()
)
response[2]["name"] = "Pumping Energy"
if request.args["process_type"] == "Water Splitting":
response[3]["x"] = [i[-1] for i in result_part]
response[3]["y"] = (
pd.np.array([i[4] for i in result_part]).astype(float).tolist()
)
response[3]["name"] = "Steam Generation"
response[0].update({"title": titlestr, "yaxis_title": param_disp})
except IndexError: # if the complete dict only shows inf, create empty graph
pass
return {"data": response}
# TODO these views do not check group permissions!
isograph_view = IsographView.as_view(IsographView.__name__)
energy_analysis_view = EnergyAnalysisView.as_view(EnergyAnalysisView.__name__)
redox_thermo_csp.add_url_rule(
"/energy/", view_func=energy_analysis_view, methods=["GET"]
)
redox_thermo_csp.add_url_rule(
"/<string:cid>/<string:plot_type>", view_func=isograph_view, methods=["GET"]
)
|
mit
|
jennifersalas/3D_CA
|
animation.py
|
1
|
2814
|
"""
Copyright (C) 2018 Jennifer Salas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
import standardFuncs
import defaultValues
def showPaths(planes):
maxlength = len(max(planes, key=len))
minlength = len(min(planes, key=len))
sgrid = defaultValues.GRID_SIZE
loc = defaultValues.OUR_LOCATION
agrid = standardFuncs.generateGrid(sgrid[0] + 300 , sgrid[1] + 300, loc)
lLat = agrid[0][0]
uLat = agrid[0][1]
lLon = agrid[1][0]
uLon = agrid[1][1]
fig = plt.figure()
ax = p3.Axes3D(fig)
planePlots = [plt.plot([], [], "bo")[0] for _ in range(len(planes))]
aLat = int((uLat - lLat) * standardFuncs.LATITUDE_TO_METERS) // 2
aLon = int((uLon - lLon) * standardFuncs.LONGITUDE_TO_METERS) // 2
# plt.xticks([lLat, uLat], ["West\n%im" % -aLat, "East\n%im" % aLat])
ax.set_xlim3d([lLat, uLat])
ax.set_xlabel('Latitude')
# plt.yticks([lLon, uLon], ["South\n%im\n" % -aLon, "North\n%im\n" % aLon],
# rotation=90)
ax.set_ylim3d([lLon, uLon])
ax.set_ylabel('Longitude')
ax.set_zlim3d([0, 20])
ax.set_zlabel('Altitude')
ax.set_title("Three Dimensional Collision Avoidance")
# fig.subplots_adjust(bottom=.15)
def init():
for planeP in planePlots:
planeP.set_data([],[])
return lines
for plane in planes:
while len(plane) < maxlength:
plane.append(plane[-1])
def animate(i):
for j,planePlot in enumerate(planePlots):
planePlot.set_data(i[j]["Latitude"],i[j]["Longitude"])
planePlot.set_marker((3,0,i[j]["bearing"]))
planePlot.set_markerfacecolor('r' if i[j]["dead"] else 'y' if i[j]["wpflag"] else 'b')
planePlot.set_3d_properties(((i[j]["Altitude"]-375) / 50) * 20)
return planePlots
planeMat = numpy.array(planes).T
print(planeMat.shape)
ani = animation.FuncAnimation(fig, animate, planeMat, interval = 25, blit = True)
plt.show()
if '__main__' == __name__:
showPaths(defaultValues.samplepath)
|
agpl-3.0
|
sketchytechky/zipline
|
zipline/data/ffc/loaders/us_equity_pricing.py
|
5
|
22123
|
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
)
from contextlib import contextmanager
from errno import ENOENT
from os import remove
from os.path import exists
from bcolz import (
carray,
ctable,
)
from click import progressbar
from numpy import (
array,
float64,
floating,
full,
iinfo,
integer,
issubdtype,
uint32,
)
from pandas import (
DatetimeIndex,
read_csv,
Timestamp,
)
from six import (
iteritems,
string_types,
with_metaclass,
)
import sqlite3
from zipline.data.ffc.base import FFCLoader
from zipline.data.ffc.loaders._equities import (
_compute_row_slices,
_read_bcolz_data,
)
from zipline.data.ffc.loaders._adjustments import load_adjustments_from_sqlite
from zipline.lib.adjusted_array import (
adjusted_array,
)
from zipline.errors import NoFurtherDataError
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = [
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
]
DAILY_US_EQUITY_PRICING_DEFAULT_FILENAME = 'daily_us_equity_pricing.bcolz'
SQLITE_ADJUSTMENT_COLUMNS = frozenset(['effective_date', 'ratio', 'sid'])
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': floating,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
UINT32_MAX = iinfo(uint32).max
@contextmanager
def passthrough(obj):
yield obj
class BcolzDailyBarWriter(with_metaclass(ABCMeta)):
"""
Class capable of writing daily OHLCV data to disk in a format that can be
read efficiently by BcolzDailyOHLCVReader.
See Also
--------
BcolzDailyBarReader : Consumer of the data written by this class.
"""
@abstractmethod
def gen_tables(self, assets):
"""
Return an iterator of pairs of (asset_id, bcolz.ctable).
"""
raise NotImplementedError()
@abstractmethod
def to_uint32(self, array, colname):
"""
Convert raw column values produced by gen_tables into uint32 values.
Parameters
----------
array : np.array
An array of raw values.
colname : str, {'open', 'high', 'low', 'close', 'volume', 'day'}
The name of the column being loaded.
For output being read by the default BcolzOHLCVReader, data should be
stored in the following manner:
- Pricing columns (Open, High, Low, Close) should be stored as 1000 *
as-traded dollar value.
- Volume should be the as-traded volume.
- Dates should be stored as seconds since midnight UTC, Jan 1, 1970.
"""
raise NotImplementedError()
def write(self, filename, calendar, assets, show_progress=False):
"""
Parameters
----------
filename : str
The location at which we should write our output.
calendar : pandas.DatetimeIndex
Calendar to use to compute asset calendar offsets.
assets : pandas.Int64Index
The assets for which to write data.
show_progress : bool
Whether or not to show a progress bar while writing.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
_iterator = self.gen_tables(assets)
if show_progress:
pbar = progressbar(
_iterator,
length=len(assets),
item_show_func=lambda i: i if i is None else str(i[0]),
label="Merging asset files:",
)
with pbar as pbar_iterator:
return self._write_internal(filename, calendar, pbar_iterator)
return self._write_internal(filename, calendar, _iterator)
def _write_internal(self, filename, calendar, iterator):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(full((nrows,), asset_id))
continue
columns[column_name].append(
self.to_uint32(table[column_name][:], column_name)
)
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
# HACK: Index with a list so that we get back an array we can pass
# to self.to_uint32. We could try to extract this in the loop
# above, but that makes the logic a lot messier.
asset_first_day = self.to_uint32(table['day'][[0]], 'day')[0]
calendar_offset[asset_key] = calendar.get_loc(
Timestamp(asset_first_day, unit='s', tz='UTC'),
)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=filename,
mode='w',
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar'] = calendar.asi8.tolist()
return full_table
class DailyBarWriterFromCSVs(BcolzDailyBarWriter):
"""
BcolzDailyBarWriter constructed from a map from csvs to assets.
Parameters
----------
asset_map : dict
A map from asset_id -> path to csv with data for that asset.
CSVs should have the following columns:
day : datetime64
open : float64
high : float64
low : float64
close : float64
volume : int64
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, asset_map):
self._asset_map = asset_map
def gen_tables(self, assets):
"""
Read CSVs as DataFrames from our asset map.
"""
dtypes = self._csv_dtypes
for asset in assets:
path = self._asset_map.get(asset)
if path is None:
raise KeyError("No path supplied for asset %s" % asset)
data = read_csv(path, parse_dates=['day'], dtype=dtypes)
yield asset, ctable.fromdataframe(data)
def to_uint32(self, array, colname):
arrmax = array.max()
if colname in OHLC:
self.check_uint_safe(arrmax * 1000, colname)
return (array * 1000).astype(uint32)
elif colname == 'volume':
self.check_uint_safe(arrmax, colname)
return array.astype(uint32)
elif colname == 'day':
nanos_per_second = (1000 * 1000 * 1000)
self.check_uint_safe(arrmax.view(int) / nanos_per_second, colname)
return (array.view(int) / nanos_per_second).astype(uint32)
@staticmethod
def check_uint_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
class BcolzDailyBarReader(object):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
A Bcolz CTable is comprised of Columns and Attributes.
Columns
-------
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
calendar : list[int64]
Calendar used to compute offsets, in asi8 format (ns since EPOCH).
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
"""
def __init__(self, table):
if isinstance(table, string_types):
table = ctable(rootdir=table, mode='r')
self._table = table
self._calendar = DatetimeIndex(table.attrs['calendar'], tz='UTC')
self._first_rows = {
int(asset_id): start_index
for asset_id, start_index in iteritems(table.attrs['first_row'])
}
self._last_rows = {
int(asset_id): end_index
for asset_id, end_index in iteritems(table.attrs['last_row'])
}
self._calendar_offsets = {
int(id_): offset
for id_, offset in iteritems(table.attrs['calendar_offset'])
}
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
# Assumes that the given dates are actually in calendar.
start_idx = self._calendar.get_loc(start_date)
end_idx = self._calendar.get_loc(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
[column.name for column in columns],
first_rows,
last_rows,
offsets,
)
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentWriter
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
SQLiteAdjustmentReader
"""
def __init__(self, conn_or_path, overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, str):
if overwrite and exists(conn_or_path):
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
def write_frame(self, tablename, frame):
if frozenset(frame.columns) != SQLITE_ADJUSTMENT_COLUMNS:
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
SQLITE_ADJUSTMENT_COLUMNS,
frame.columns.tolist(),
)
)
elif tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename, SQLITE_ADJUSTMENT_TABLENAMES
)
)
expected_dtypes = SQLITE_ADJUSTMENT_COLUMN_DTYPES
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column '{colname}', "
"but got {actual}.".format(
expected=expected,
colname=colname,
actual=actual,
)
)
return frame.to_sql(tablename, self.conn)
def write(self, splits, mergers, dividends):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame
Dataframe containing split data.
mergers : pandas.DataFrame
DataFrame containing merger data.
dividends : pandas.DataFrame
DataFrame containing dividend data.
Notes
-----
DataFrame input (`splits`, `mergers`, and `dividends`) should all have
the following columns:
effective_date : int
The date, represented as seconds since Unix epoch, on which the
adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
sid : int
The asset id associated with this adjustment.
The ratio column is interpreted as follows:
- For all adjustment types, multiply price fields ('open', 'high',
'low', and 'close') by the ratio.
- For **splits only**, **divide** volume by the adjustment ratio.
Dividend ratios should be calculated as
1.0 - (dividend_value / "close on day prior to dividend ex_date").
Returns
-------
None
See Also
--------
SQLiteAdjustmentReader : Consumer for the data written by this class
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_frame('dividends', dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
def close(self):
self.conn.close()
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
"""
def __init__(self, conn):
if isinstance(conn, str):
conn = sqlite3.connect(conn)
self.conn = conn
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
[column.name for column in columns],
dates,
assets,
)
class USEquityPricingLoader(FFCLoader):
"""
FFCLoader for US Equity Pricing
Delegates loading of baselines and adjustments.
"""
def __init__(self, raw_price_loader, adjustments_loader):
self.raw_price_loader = raw_price_loader
# HACK: Pull the calendar off our raw_price_loader so that we can
# backshift dates.
self._calendar = self.raw_price_loader._calendar
self.adjustments_loader = adjustments_loader
def load_adjusted_array(self, columns, dates, assets, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
start_date, end_date = _shift_dates(
self._calendar, dates[0], dates[-1], shift=1,
)
raw_arrays = self.raw_price_loader.load_raw_arrays(
columns,
start_date,
end_date,
assets,
)
adjustments = self.adjustments_loader.load_adjustments(
columns,
dates,
assets,
)
return [
adjusted_array(raw_array, mask, col_adjustments)
for raw_array, col_adjustments in zip(raw_arrays, adjustments)
]
def _shift_dates(dates, start_date, end_date, shift):
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Modeling Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Modeling Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Modeling Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift]
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.