repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/stackplot.py | 7 | 4266 | """
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
http://stackoverflow.com/questions/2225995/how-can-i-create-stacked-line-graph-with-matplotlib
(http://stackoverflow.com/users/66549/doug)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
from cycler import cycler
import numpy as np
__all__ = ['stackplot']
def stackplot(axes, x, *args, **kwargs):
"""Draws a stacked area plot.
*x* : 1d array of dimension N
*y* : 2d array of dimension MxN, OR any number 1d arrays each of dimension
1xN. The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y is MxN
stackplot(x, y1, y2, y3, y4) # where y1, y2, y3, y4, are all 1xNm
Keyword arguments:
*baseline* : ['zero', 'sym', 'wiggle', 'weighted_wiggle']
Method used to calculate the baseline. 'zero' is just a
simple stacked plot. 'sym' is symmetric around zero and
is sometimes called `ThemeRiver`. 'wiggle' minimizes the
sum of the squared slopes. 'weighted_wiggle' does the
same but weights to account for size of each layer.
It is also called `Streamgraph`-layout. More details
can be found at http://www.leebyron.com/else/streamgraph/.
*labels* : A list or tuple of labels to assign to each data series.
*colors* : A list or tuple of colors. These will be cycled through and
used to colour the stacked areas.
All other keyword arguments are passed to
:func:`~matplotlib.Axes.fill_between`
Returns *r* : A list of
:class:`~matplotlib.collections.PolyCollection`, one for each
element in the stacked area plot.
"""
if len(args) == 1:
y = np.atleast_2d(*args)
elif len(args) > 1:
y = np.row_stack(args)
labels = iter(kwargs.pop('labels', []))
colors = kwargs.pop('colors', None)
if colors is not None:
axes.set_prop_cycle(cycler('color', colors))
baseline = kwargs.pop('baseline', 'zero')
# Assume data passed has not been 'stacked', so stack it here.
stack = np.cumsum(y, axis=0)
r = []
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(0, m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
m, n = y.shape
center = np.zeros(n)
total = np.sum(y, 0)
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size / total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
else:
errstr = "Baseline method %s not recognised. " % baseline
errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
raise ValueError(errstr)
# Color between x = 0 and the first array.
if 'color' in axes._get_lines._prop_keys:
color = six.next(axes._get_lines.prop_cycler)['color']
else:
color = None
r.append(axes.fill_between(x, first_line, stack[0, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
# Color between array i-1 and array i
for i in xrange(len(y) - 1):
if 'color' in axes._get_lines._prop_keys:
color = six.next(axes._get_lines.prop_cycler)['color']
else:
color = None
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color,
label= six.next(labels, None),
**kwargs))
return r
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/offsetbox.py | 1 | 50154 | """
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox, \
IdentityTransform, BboxTransformFrom
from matplotlib.font_manager import FontProperties
from matplotlib.patches import FancyBboxPatch, FancyArrowPatch
from matplotlib import rcParams
from matplotlib import docstring
import matplotlib.cbook as cbook
#from bboximage import BboxImage
from matplotlib.image import BboxImage
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def contains(self, mouseevent):
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent, renderer):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent, renderer)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_visible_children(self):
"""
Return a list of visible artists it contains.
"""
return [c for c in self._children if c.get_visible()]
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd, renderer)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes. Can be one of 'top', 'bottom',
'left', 'right', 'center' and 'baseline'
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *hight*
need to be in pixels.
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *hight*
need to be in pixels.
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
whd_list = [c.get_extent(renderer) for c in self.get_visible_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*pad, height + 2*pad, \
xdescent+pad, ydescent+pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
.. note::
*pad* and *sep* need to given in points and will be
scale with the renderer dpi, while *width* and *hight*
need to be in pixels.
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
whd_list = [c.get_extent(renderer) for c in self.get_visible_children()]
if not whd_list:
return 2*pad, 2*pad, pad, pad, []
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*pad, height + 2*pad, \
xdescent + pad, ydescent + pad, \
zip(xoffsets, yoffsets)
class PaddedBox(OffsetBox):
def __init__(self, child, pad=None, draw_frame=False, patch_attrs=None):
"""
*pad* : boundary pad
.. note::
*pad* need to given in points and will be
scale with the renderer dpi, while *width* and *hight*
need to be in pixels.
"""
super(PaddedBox, self).__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, #self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square",pad=0)
if patch_attrs is not None:
self.patch.update(patch_attrs)
self._drawFrame = draw_frame
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
w, h, xd, yd = self._children[0].get_extent(renderer)
return w + 2*pad, h + 2*pad, \
xd+pad, yd+pad, \
[(0, 0)]
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px+ox, py+oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw_frame(self, renderer):
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox)
if self._drawFrame:
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self.dpi_transform = mtransforms.Affine2D()
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
dpi_cor = renderer.points_to_pixels(1.)
return self.width*dpi_cor, self.height*dpi_cor, \
self.xdescent*dpi_cor, self.ydescent*dpi_cor
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor, dpi_cor)
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_text(self, s):
"set text"
self._text.set_text(s)
def get_text(self):
"get text"
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[-1][0] # last line
_, hh, dd = renderer.get_text_width_height_descent(
line, self._text._fontproperties, ismath=ismath)
d = dd # the baseline of the last line
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-d)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(d, d_)
#else:
# d = d
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform . Its children will be
transformed with the aux_transform first then will be
offseted. The absolute coordinate of the aux_transform is meaning
as it will be automaticcaly adjust so that the left-lower corner
of the bounding box of children will be set to (0,0) before the
offset trnasform.
It is similar to drawing area, except that the extent of the box
is not predetemined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
OffsetBox.__init__(self)
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
# ref_offset_transform is used to make the offset_transform is
# always reference to the lower-left corner of the bbox of its
# children.
self.ref_offset_transform = mtransforms.Affine2D()
self.ref_offset_transform.clear()
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.aux_transform + \
self.ref_offset_transform + \
self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
# clear the offset transforms
_off = self.offset_transform.to_values() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = mtransforms.Bbox.union(bboxes)
# adjust ref_offset_tansform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restor offset transform
mtx = self.offset_transform.matrix_from_values(*_off)
self.offset_transform.set_matrix(mtx)
return ub.width, ub.height, 0., 0.
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to the legend location
loc. AnchoredOffsetbox has a single child. When multiple children
is needed, use other OffsetBox class to enlose them. By default,
the offset box is anchored against its parent axes. You may
explicitly specify the bbox_to_anchor.
"""
zorder = 5 # zorder of the legend
def __init__(self, loc,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
loc is a string or an integer specifying the legend location.
The valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
pad : pad around the child for drawing a frame. given in
fraction of fontsize.
borderpad : pad between offsetbox frame and the bbox_to_anchor,
child : OffsetBox instance that will be anchored.
prop : font property. This is only used as a reference for paddings.
frameon : draw a frame box if True.
bbox_to_anchor : bbox to anchor. Use self.axes.bbox if None.
bbox_transform : with which the bbox_to_anchor will be transformed.
"""
super(AnchoredOffsetbox, self).__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
self.loc = loc
self.borderpad=borderpad
self.pad = pad
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
elif isinstance(prop, dict):
self.prop=FontProperties(**prop)
if "size" not in prop:
self.prop.set_size(rcParams["legend.fontsize"])
else:
self.prop = prop
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square",pad=0)
self._drawFrame = frameon
def set_child(self, child):
"set the child to be anchored"
self._child = child
def get_child(self):
"return the child"
return self._child
def get_children(self):
"return the list of children"
return [self._child]
def get_extent(self, renderer):
"""
return the extent of the artist. The extent of the child
added with the pad is returned
"""
w, h, xd, yd = self.get_child().get_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return w+2*pad, h+2*pad, xd+pad, yd+pad
def get_bbox_to_anchor(self):
"""
return the bbox that the legend will be anchored
"""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor,
transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
set the bbox that the child will be anchored.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError:
raise ValueError("Invalid argument for bbox : %s" % str(bbox))
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
self._update_offset_func(renderer)
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset(w, h, xd, yd, renderer)
return Bbox.from_bounds(ox-xd, oy-yd, w, h)
def _update_offset_func(self, renderer, fontsize=None):
"""
Update the offset func which depends on the dpi of the
renderer (because of the padding).
"""
if fontsize is None:
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
def _offset(w, h, xd, yd, renderer, fontsize=fontsize, self=self):
bbox = Bbox.from_bounds(0, 0, w, h)
borderpad = self.borderpad*fontsize
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = self._get_anchored_bbox(self.loc,
bbox,
bbox_to_anchor,
borderpad)
return x0+xd, y0+yd
self.set_offset(_offset)
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
"draw the artist"
if not self.get_visible(): return
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
if self._drawFrame:
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent, renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
def _get_anchored_bbox(self, loc, bbox, parentbbox, borderpad):
"""
return the position of the bbox anchored at the parentbbox
with the loc code, with the borderpad.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text
"""
def __init__(self, s, loc, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
*s* : string
*loc* : location code
*prop* : font property
*pad* : pad between the text and the frame as fraction of the font size.
*borderpad* : pad between the frame and the axes (or bbox_to_anchor).
other keyword parameters of AnchoredOffsetbox are also allowed.
"""
self.txt = TextArea(s, textprops=prop,
minimumdescent=False)
fp = self.txt._text.get_fontproperties()
super(AnchoredText, self).__init__(loc, pad=pad, borderpad=borderpad,
child=self.txt,
prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr,
zoom=1,
cmap = None,
norm = None,
interpolation=None,
origin=None,
filternorm=1,
filterrad=4.0,
resample = False,
dpi_cor=True,
**kwargs
):
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap = cmap,
norm = norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample = resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
OffsetBox.__init__(self)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
def get_zoom(self):
return self._zoom
# def set_axes(self, axes):
# self.image.set_axes(axes)
# martist.Artist.set_axes(self, axes)
# def set_offset(self, xy):
# """
# set offset of the container.
# Accept : tuple of x,y cooridnate in disokay units.
# """
# self._offset = xy
# self.offset_transform.clear()
# self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_children(self):
return [self.image]
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset()
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
if self._dpi_cor: # True, do correction
dpi_cor = renderer.points_to_pixels(1.)
else:
dpi_cor = 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = nx*zoom, ny*zoom
return w, h, 0, 0
def draw(self, renderer):
"""
Draw the children
"""
self.image.draw(renderer)
#bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
from matplotlib.text import _AnnotationBase
class AnnotationBbox(martist.Artist, _AnnotationBase):
"""
Annotation-like class, but with offsetbox instead of Text.
"""
zorder = 3
def __str__(self):
return "AnnotationBbox(%g,%g)"%(self.xy[0],self.xy[1])
@docstring.dedent_interpd
def __init__(self, offsetbox, xy,
xybox=None,
xycoords='data',
boxcoords=None,
frameon=True, pad=0.4, # BboxPatch
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
*offsetbox* : OffsetBox instance
*xycoords* : same as Annotation but can be a tuple of two
strings which are interpreted as x and y coordinates.
*boxcoords* : similar to textcoords as Annotation but can be a
tuple of two strings which are interpreted as x and y
coordinates.
*box_alignment* : a tuple of two floats for a vertical and
horizontal alignment of the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0.0) and upper-right corner is (1.1).
other parameters are identical to that of Annotation.
"""
self.offsetbox = offsetbox
self.arrowprops = arrowprops
self.set_fontsize(fontsize)
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
_AnnotationBase.__init__(self,
xy, xytext=xybox,
xycoords=xycoords, textcoords=boxcoords,
annotation_clip=annotation_clip)
martist.Artist.__init__(self, **kwargs)
#self._fw, self._fh = 0., 0. # for alignment
self._box_alignment = box_alignment
# frame
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True
)
self.patch.set_boxstyle("square",pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._drawFrame = frameon
def contains(self,event):
t,tinfo = self.offsetbox.contains(event)
#if self.arrow_patch is not None:
# a,ainfo=self.arrow_patch.contains(event)
# t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
set fontsize in points
"""
if s is None:
s = rcParams["legend.fontsize"]
self.prop=FontProperties(size=s)
def get_fontsize(self, s=None):
"""
return fontsize in points
"""
return self.prop.get_size_in_points()
def update_positions(self, renderer):
"Update the pixel positions of the annotated point and the text."
xy_pixel = self._get_position_xy(renderer)
self._update_position_xybox(renderer, xy_pixel)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrow_patch:
self.arrow_patch.set_mutation_scale(mutation_scale)
def _update_position_xybox(self, renderer, xy_pixel):
"Update the pixel positions of the annotation text and the arrow patch."
x, y = self.xytext
if isinstance(self.textcoords, tuple):
xcoord, ycoord = self.textcoords
x1, y1 = self._get_xy(renderer, x, y, xcoord)
x2, y2 = self._get_xy(renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = self._get_xy(renderer, x, y, self.textcoords)
w, h, xd, yd = self.offsetbox.get_extent(renderer)
_fw, _fh = self._box_alignment
self.offsetbox.set_offset((ox0-_fw*w+xd, oy0-_fh*h+yd))
# update patch position
bbox = self.offsetbox.get_window_extent(renderer)
#self.offsetbox.set_offset((ox0-_fw*w, oy0-_fh*h))
self.patch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
x, y = xy_pixel
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
fs = self.prop.get_size_in_points()
mutation_scale = d.pop("mutation_scale", fs)
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = d.pop("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
xy_pixel = self._get_position_xy(renderer)
if not self._check_xy(renderer, xy_pixel):
return
self.update_positions(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
if self._drawFrame:
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
class DraggableBase(object):
"""
helper code for a draggable artist (legend, offsetbox)
The derived class must override following two method.
def saveoffset(self):
pass
def update_offset(self, dx, dy):
pass
*saveoffset* is called when the object is picked for dragging and it is
meant to save reference position of the artist.
*update_offset* is called during the dragging. dx and dy is the pixel
offset from the point where the mouse drag started.
Optionally you may override following two methods.
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def finalize_offset(self):
pass
*artist_picker* is a picker method that will be
used. *finalize_offset* is called when the mouse is released. In
current implementaion of DraggableLegend and DraggableAnnotation,
*update_offset* places the artists simply in display
coordinates. And *finalize_offset* recalculate their position in
the normalized axes coordinate and set a relavant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
self.got_artist = False
self._use_blit = use_blit
self.canvas = self.ref_artist.figure.canvas
c2 = self.canvas.mpl_connect('pick_event', self.on_pick)
c3 = self.canvas.mpl_connect('button_release_event', self.on_release)
ref_artist.set_picker(self.artist_picker)
self.cids = [c2, c3]
def on_motion(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.draw()
def on_motion_blit(self, evt):
if self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
def on_pick(self, evt):
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.got_artist = True
if self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ref_artist.figure.bbox)
self.ref_artist.draw(self.ref_artist.figure._cachedRenderer)
self.canvas.blit(self.ref_artist.figure.bbox)
self._c1 = self.canvas.mpl_connect('motion_notify_event', self.on_motion_blit)
else:
self._c1 = self.canvas.mpl_connect('motion_notify_event', self.on_motion)
self.save_offset()
def on_release(self, event):
if self.got_artist:
self.finalize_offset()
self.got_artist = False
self.canvas.mpl_disconnect(self._c1)
if self._use_blit:
self.ref_artist.set_animated(False)
def disconnect(self):
'disconnect the callbacks'
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def artist_picker(self, artist, evt):
return self.ref_artist.contains(evt)
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
DraggableBase.__init__(self, ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
offset = offsetbox.get_offset(w, h, xd, yd, renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox=self.offsetbox
renderer = offsetbox.figure._cachedRenderer
w, h, xd, yd = offsetbox.get_extent(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox-xd, oy-yd)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
DraggableBase.__init__(self, annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
x, y = ann.xytext
if isinstance(ann.textcoords, tuple):
xcoord, ycoord = ann.textcoords
x1, y1 = ann._get_xy(self.canvas.renderer, x, y, xcoord)
x2, y2 = ann._get_xy(self.canvas.renderer, x, y, ycoord)
ox0, oy0 = x1, y2
else:
ox0, oy0 = ann._get_xy(self.canvas.renderer, x, y, ann.textcoords)
self.ox, self.oy = ox0, oy0
self.annotation.textcoords = "figure pixels"
self.update_offset(0, 0)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xytext = self.ox + dx, self.oy + dy
x, y = ann.xytext
xy = ann._get_xy(self.canvas.renderer, x, y, ann.textcoords)
def finalize_offset(self):
loc_in_canvas = self.annotation.xytext
self.annotation.textcoords = "axes fraction"
pos_axes_fraction = self.annotation.axes.transAxes.inverted().transform_point(loc_in_canvas)
self.annotation.xytext = tuple(pos_axes_fraction)
if __name__ == "__main__":
fig = plt.figure(1)
fig.clf()
ax = plt.subplot(121)
#txt = ax.text(0.5, 0.5, "Test", size=30, ha="center", color="w")
kwargs = dict()
a = np.arange(256).reshape(16,16)/256.
myimage = OffsetImage(a,
zoom=2,
norm = None,
origin=None,
**kwargs
)
ax.add_artist(myimage)
myimage.set_offset((100, 100))
myimage2 = OffsetImage(a,
zoom=2,
norm = None,
origin=None,
**kwargs
)
ann = AnnotationBbox(myimage2, (0.5, 0.5),
xybox=(30, 30),
xycoords='data',
boxcoords="offset points",
frameon=True, pad=0.4, # BboxPatch
bboxprops=dict(boxstyle="round", fc="y"),
fontsize=None,
arrowprops=dict(arrowstyle="->"),
)
ax.add_artist(ann)
plt.draw()
plt.show()
| gpl-2.0 |
vivekmishra1991/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
xyguo/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
sinhrks/pyopendata | pyopendata/tests/test_oecd.py | 1 | 4208 | # pylint: disable-msg=E1101,W0613,W0603
from pyopendata import OECDStore, OECDResource
import numpy as np
import pandas as pd
from pandas.compat import range
import pandas.util.testing as tm
class TestOECDTestSite(tm.TestCase):
def setUp(self):
self.store = OECDStore()
def test_isvalid(self):
self.assertTrue(self.store.is_valid())
def test_get_un_den(self):
data = self.store.get('UN_DEN')
self.assertTrue(isinstance(data, OECDResource))
df = data.read()
au = [50.17292785, 49.47181009, 49.52106174, 49.16341327, 48.19296375,
47.8863461, 45.83517292, 45.02021403, 44.78983834, 44.37794217,
44.15358142, 45.38865546, 46.33092037, 47.2343406, 48.80023876,
50.0639872, 50.23390644, 49.8214994, 49.67636585, 49.55227375,
48.48657368, 47.41179739, 47.52526561, 47.93048854, 47.26327162,
45.4617105, 43.90202112, 42.32759607, 40.35838899, 39.35157364,
39.55023059, 39.93196617, 39.21915611, 37.24341973, 34.42587311,
32.51238056, 31.16811321, 29.78802692, 28.14678656, 25.41919614,
25.71802695, 24.53149132, 23.21981032, 22.99175908, 22.29392791,
22.29118237, 20.22236361, 18.51151852, 18.5674024, 19.31219498,
18.44405734, 18.51407731, 18.19718895, 17.04071205]
jp = [32.89493671, 33.80225989, 34.5969919, 35.01871257, 35.46869345,
35.28164117, 34.749499, 34.40573103, 34.50762389, 35.16411379,
35.10284332, 34.57209848, 34.31168831, 33.46611342, 34.26450371,
34.53099287, 33.69881466, 32.99814274, 32.59541985, 31.75696594,
31.14832536, 30.8917513, 30.56612982, 29.75285171, 29.22391559,
28.79202411, 28.18680064, 27.71454381, 26.94358748, 26.13165206,
26.13236815, 25.24310276, 24.98554405, 24.83467897, 24.7263178,
24.38077142, 23.3953401, 22.78797997, 22.52794337, 22.18157944,
21.54406273, 20.88284597, 20.26073907, 19.73945642, 19.06442577,
18.79844243, 18.3497807, 18.25095057, 18.2204924, 18.45787546,
18.38733297, 18.99504195, 17.97238372, 17.78318026]
us = [30.89748411, 29.51891217, 29.34276869, 28.51337535, 28.30646144,
28.16661991, 28.19557735, 27.76578899, 27.9004622, 27.30836054,
27.43402867, 26.94941363, 26.25996487, 25.83134349, 25.74427582,
25.28771204, 24.38412814, 23.59186681, 23.94328194, 22.3651229,
22.06009466, 21.01328205, 20.47463895, 19.45290876, 18.22953818,
17.44855678, 17.00126975, 16.5162476, 16.24744487, 15.86401127,
15.45147174, 15.46986912, 15.1499578, 15.13654544, 14.91544059,
14.31762091, 14.02052225, 13.55213736, 13.39571457, 13.36670812,
12.90865079, 12.86997731, 12.76906383, 12.39142968, 12.02130767,
11.96023574, 11.48458378, 11.56435375, 11.91022276, 11.79401904,
11.38345975, 11.32948829, 11.07884758, 10.80789137]
index = pd.DatetimeIndex(map(str, range(1960, 2014)))
for label, values in [('Australia', au), ('Japan', jp), ('United States', us)]:
expected = pd.Series(values, index=index)
tm.assert_series_equal(df[label], expected)
raw_data = data.read(raw=True)
self.assertTrue(len(raw_data) > 0)
def test_get_tourism(self):
data = self.store.get('TOURISM_INBOUND')
df = data.read()
jp = np.array([6138, 6728, 7334, 8347, 8351, 6790, 8611, 6219, 8368, ], dtype=float)
us = np.array([np.nan, np.nan, 183178, 175142, 175632, 160359, 162269, 164672, 171630], dtype=float)
index = pd.DatetimeIndex(['2004', '2005', '2006', '2007', '2008',
'2009', '2010', '2011', '2012'])
for label, values in [('Japan', jp), ('United States', us)]:
expected = pd.Series(values, index=index)
tm.assert_series_equal(df[label]['Total international arrivals'], expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
| bsd-2-clause |
fyffyt/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
lenovor/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
bavardage/statsmodels | statsmodels/iolib/tests/test_summary.py | 4 | 1486 | '''examples to check summary, not converted to tests yet
'''
if __name__ == '__main__':
from statsmodels.regression.tests.test_regression import TestOLS
#def mytest():
aregression = TestOLS()
TestOLS.setupClass()
results = aregression.res1
r_summary = str(results.summary_old())
print r_summary
olsres = results
print '\n\n'
r_summary = str(results.summary())
print r_summary
print '\n\n'
from statsmodels.discrete.tests.test_discrete import TestProbitNewton
aregression = TestProbitNewton()
TestProbitNewton.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print r_summary
print '\n\n'
probres = results
from statsmodels.robust.tests.test_rlm import TestHampel
aregression = TestHampel()
#TestHampel.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print r_summary
rlmres = results
print '\n\n'
from statsmodels.genmod.tests.test_glm import TestGlmBinomial
aregression = TestGlmBinomial()
#TestGlmBinomial.setupClass()
results = aregression.res1
r_summary = str(results.summary())
print r_summary
#print results.summary2(return_fmt='latex')
#print results.summary2(return_fmt='csv')
smry = olsres.summary()
print smry.as_csv()
# import matplotlib.pyplot as plt
# plt.plot(rlmres.model.endog,'o')
# plt.plot(rlmres.fittedvalues,'-')
#
# plt.show() | bsd-3-clause |
mwsmith2/traveler-db | traveler/backend.py | 1 | 21977 | """
Backend
=======
The backend is a collection of functions that range from general
utility functions to abstraction that remove the other classes
from direct database interactions.
"""
# Standard library
import subprocess
import sys
import io
import time
import couchdb
import simplejson
import zipfile
import numpy as np
import matplotlib
matplotlib.use('Agg') # a non-interactive backend
import matplotlib.pyplot as plt
# Other imports
from flask import session, request, redirect, url_for, render_template, g
from couchdb.design import ViewDefinition
from werkzeug.utils import secure_filename
from traveler import app
# Need to preload this as a global.
group_attr = None
def unique_filename(upload_file):
"""Take a base filename, add characters to make it more unique, and
ensure that it is a secure filename."""
return secure_filename(upload_file)
def reset_group_attr():
"""Used by the frontend to trigger a reset."""
global group_attr
group_attr = None
get_group_attr()
def get_group_attr():
"""Load the file containing the structure of each traveler entry type."""
global group_attr
if group_attr is None:
try:
group_attr = get_db()['entry_attributes']['type_info']
# Needs to be loaded from file
except:
type_info = {}
type_info['_id'] = 'entry_attributes'
try:
group_attr = simplejson.load(open(app.config['ENTRY_CONFIG']))
except:
group_attr = {}
type_info['type_info'] = group_attr
get_db().save(type_info)
# Reset some parameters
session['groups'] = group_attr.keys()[::-1]
if len(session['groups']) > 0:
session['group'] = session['groups'][0]
else:
session['group'] = ''
session['group_titles'] = {}
for group in session['groups']:
session['group_titles'][group] = group_attr[group]['title']
return group_attr
def get_type_attr():
"""Load the file containing the structure of each traveler entry type."""
try:
session['group']
except:
session['group'] = ''
if session['group'] != '':
return get_group_attr()[session['group']]['types']
else:
return {}
def get_type_template():
"""Create an empty type structure to fill."""
data = {}
data['type'] = ''
data['title'] = ''
data['attr'] = {}
data['attr']['get_text'] = []
data['attr']['get_radio'] = {}
data['attr']['get_num'] = []
data['attr']['get_file'] = []
data['attr']['get_img'] = []
data['opt_num'] = []
return data
def get_group_template():
"""Create an empty type structure to fill."""
data = {}
data['group'] = ''
data['title'] = ''
data['types'] = {}
return data
def copy_form_data(info):
"""Copy the data in the form that is filled out."""
# Initialize the data
data = {}
data['notes'] = request.form['notes']
# Reset the error just in case it was lingering unhandled.
session['error'] = None
try:
data['status'] = request.form['status']
except:
data['status'] = ''
# Copy the text attributes.
for key in info['attr']['get_text']:
data[key] = request.form[key]
# Copy the radio attributes.
for key in info['attr']['get_radio']:
data[key] = request.form[key]
# Copy the numerical attributes.
for key in info['attr']['get_num']:
data[key] = request.form[key]
# Copy the attached file's name.
for key in info['attr']['get_file']:
file = request.files[key]
if file.filename == '':
data[key] = file.filename
else:
ext = file.filename.split('.')[-1]
data[key] = unique_filename(key + '.' + ext)
# Copy the attached image's name.
for key in info['attr']['get_img']:
file = request.files[key]
if file.filename == '':
data[key] = file.filename
else:
ext = file.filename.split('.')[-1]
data[key] = unique_filename(key + '.' + ext)
return data
def check_form_data(info, data):
"""Check each spot in the form data."""
for key in info['attr']['get_text']:
if data[key] == '':
session['error'] = "All fields in the form must be filled."
return
for key in info['attr']['get_radio']:
if data[key] == '':
session['error'] = "All fields in the form must be filled."
return
for key in info['attr']['get_num']:
if data[key] == '':
session['error'] = "All fields in the form must be filled."
return
try:
float(data[key])
except:
session['error'] = "Invalid numerical entry."
return
if data['status'] == '':
session['error'] = "The current device status must be set."
return
for key in info['attr']['get_file']:
if data[key] == '':
session['error'] = "All attachments must be included."
return
for key in info['attr']['get_img']:
if data[key] == '':
session['error'] = "All images must be included."
return
def process_entry():
"""Do some standard processing of entries to simplify entry functions."""
if request.method == 'POST':
# Copy the data and add a few things.
data = copy_form_data(session['info'])
# Check to see if the form was filled out properly.
check_form_data(session['info'], data)
# Add a few things not in the form for the first entry
data['type'] = session['info']['type']
data['title'] = session['info']['title']
data['time'] = time.asctime()
try:
data['user'] = session['user']
except:
msg = 'No current user found. Please login.'
render_template('login.html', message=msg)
data['last_time'] = data['time']
data['last_user'] = data['user']
# Initialize optional lists.
data['plots'] = {}
data['opt_file'] = []
data['opt_img'] = []
data['opt_num'] = []
return data
def process_update():
"""Do some standard processing on an update to simplify user functions."""
if request.method == 'POST':
# Grab the original data.
data = get_entry(request.form['view_id'])
# Copy the data update.
temp_data = copy_form_data(session['info'])
# Update things that are new
for key in temp_data.keys():
if temp_data[key] != '':
data[key] = temp_data[key]
# Record who made the update.
data['last_time'] = time.asctime()
data['last_user'] = session['user']
return data
def save_entry(data):
"""Final function call in entry functions. It either adds the entry
or returns the error page."
"""
info = session['info']
if session['error'] is not None:
return render_template('add_entry.html',
data=data,
error=session['error'])
id, rev = save_to_db(data)
doc = {'_id': id, '_rev': rev}
# Save the attached files.
for key in info['attr']['get_file']:
if request.files[key].filename != '':
request.files[key].seek(0)
put_upload(doc, request.files[key], filename=data[key])
# Save the attached images.
for key in info['attr']['get_img']:
if request.files[key].filename != '':
request.files[key].seek(0)
put_upload(doc, request.files[key], filename=data[key])
for key in data['plots'].keys():
name = data['plots'][key]
g.plots[name].seek(0)
put_upload(doc, g.plots[name], filename=name)
# Now redirect to show the page of the new entry.
return redirect(url_for('show_entry',
group=session['group'],
entry=data['type'],
view_id=data['id']))
def add_new_type(info):
"""Check the form for errors on a new type, then submit it."""
# Reset the error flag
session['error'] = None
# Make sure all the pieces are there and not 'None'.
if info['type'] is 'None':
session['error'] = 'Type name cannot be none.'
if info['title'] is 'None':
session['error'] = 'Type title cannot be none.'
# Make sure none of the sections have entries with the same name.
templist = []
for item in info['attr']['get_text']:
if item in templist:
session['error'] = 'Multiple text attributes with same name.'
break
else:
templist.append(item)
templist = []
for item in info['attr']['get_num']:
if item in templist:
session['error'] = 'Multiple numeric attributes with same name.'
break
else:
templist.append(item)
templist = []
for item in info['attr']['get_num']:
if item in templist:
session['error'] = 'Multiple numeric attributes with same name.'
break
else:
templist.append(item)
templist = []
for item in info['attr']['get_img']:
if item in templist:
session['error'] = 'Multiple image attachments with same name.'
break
else:
templist.append(item)
for val in info['attr']['get_radio'].values():
if len(val) == 0:
session['error'] = 'Radio attribute has zero options.'
break
if session['error'] is not None:
return render_template('new_type.html',
type=info,
error=session['error'])
db = get_db()
doc = db['entry_attributes']
doc['type_info'][session['group']]['types'][info['type']] = info
id, rev = db.save(doc)
# Trigger a reread of the group attributes
global group_attr
group_attr = None
# Now redirect to show the page of the new entry.
return redirect(url_for('group_home', group=session['group']))
def add_new_group(info):
"""Check the form for errors on a new type, then submit it."""
# Reset the error flag
session['error'] = None
# Make sure all the pieces are there and not 'None'.
if info['group'] is '':
session['error'] = 'Type name cannot be none.'
if info['title'] is '':
session['error'] = 'Type title cannot be none.'
if session['error'] is not None:
return render_template('new_group.html',
group=info,
error=session['error'])
db = get_db()
doc = db['entry_attributes']
doc['type_info'][info['group']] = info
id, rev = db.save(doc)
# Trigger a reread of the group attributes
global group_attr
group_attr = None
# Now redirect to show the page of the new entry.
return redirect(url_for('group_home', group=info['group']))
def get_entry(entry_id):
"""Use permanent views to find a specific entry."""
db = get_db()
data = None
id_map = get_id_map()
for row in id_map:
if (int(entry_id) == row.key or entry_id == row.value):
data = db.get(row.id)
break
return data
def get_all_entries():
"""Use permanent views to return all entries of a type."""
db = get_db()
entries = []
for row in get_entry_list():
entries.append(db.get(row.value))
return entries
def get_num_entries():
"""A map/reduce function to count entries of a specific type."""
return len(get_entry_list())
# Permanent views section
def get_entry_list():
"""A map function that stores a view of all the entries for
a specific type.
"""
tp = session['info']['type']
gr = session['group']
db = get_db()
# Ask for the view.
res = db.view('_design/content/_view/' + '_'.join([gr, tp, 'list']))
# See if the resource is really there.
try:
len(res)
return res
except:
mapfun = """
function(doc) {
if (doc.type == '""" + session['info']['type'] + """')
emit(doc.sn, doc._id)
}"""
view = ViewDefinition('content', '_'.join([gr, tp, 'list']), mapfun)
view.sync(db)
return db.view('_design/content/_view/' + '_'.join([gr, tp, 'list']))
def get_id_map():
"""A map function that stores a view of the id->sn conversion for
all entries of a specific type.
"""
tp = session['info']['type']
gr = session['group']
db = get_db()
# Ask for the view.
res = db.view('_design/content/_view/' + '_'.join([gr, tp, 'id_map']))
# See if the view is really there.
try:
len(res)
return res
except:
mapfun = """
function(doc) {
if (doc.type == '""" + session['info']['type'] + """')
emit(doc.id, doc.sn)
}"""
view = ViewDefinition('content', '_'.join([gr, tp, 'id_map']), mapfun)
view.sync(db)
return db.view('_design/content/_view/' + '_'.join([gr, tp, 'id_map']))
def get_attribute(attr):
"""A map function that stores an attribute for all entries of a
specific type.
"""
tp = session['info']['type']
gr = session['group']
db = get_db()
# Ask for the view.
res = db.view('_design/attributes/_view/' + '_'.join([gr, tp, attr]))
# See if the view is really there.
try:
len(res)
return res
except:
mapfun = """
function(doc) {
if (doc.type == '""" + session['info']['type'] + """')
emit(doc['""" + attr + """'], doc.id)
}"""
view = ViewDefinition('attributes', '_'.join([gr, tp, attr]), mapfun)
view.sync(db)
return db.view('_design/attributes/_view/' + '_'.join([gr, tp, attr]))
def make_histogram(which):
"""Create a histogram of the selected attribute for the current type."""
num_entries = get_num_entries()
attr_list = get_attribute(which)
entry_status = get_attribute('status')
values = np.empty(num_entries)
idx = 0
for val, stat in zip(attr_list, entry_status):
if stat.key != 'defective':
values[idx] = float(val.key)
idx += 1
# Make the histogram.
plt.clf()
plt.figure()
plt.hist(values[:idx])
plt.xlabel(which)
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
return buf
def put_upload(doc, f, filename=None):
"""Save a file(or file buffer) to the database."""
db = get_db()
# Save to the db
f.seek(0)
db.put_attachment(doc, f, filename=filename)
def del_upload(doc, filename):
"""Remove an attachment from the entry document."""
db = get_db()
db.delete_attachment(doc, filename)
def cache_plot(fig, filename):
"""A simple wrapper to upload a plot in the database"""
# Allocate a buffer for the plot
buf = io.BytesIO()
# Make a plot and attach it to the buffer.
fig.savefig(buf, format='png')
buf.seek(0)
# Save to the global context.
try:
g.plots[filename] = buf
except:
g.plots = {filename: buf}
def fetch_file(doc, filename):
"""Retrieve a file from the database directly."""
db = get_db()
try:
doc['type']
except:
doc = db[doc]
f = db.get_attachment(doc, filename)
buf = io.BytesIO()
buf.write(f.read())
buf.seek(0)
return buf
def make_entry_zip(type, id):
"""Make a zip archive of the entry data."""
data = get_entry(id)
info = session['info']
zip_buf = io.BytesIO()
with zipfile.ZipFile(zip_buf, 'w') as zf:
zf.writestr('entry_data.json', simplejson.dumps(data))
for key in info['attr']['get_file']:
zf.writestr(data[key], fetch_file(data, data[key]).read())
for key in info['attr']['get_img']:
zf.writestr(data[key], fetch_file(data, data[key]).read())
for fname in data['opt_file']:
zf.writestr(fname, fetch_file(data, fname).read())
for fname in data['opt_img']:
zf.writestr(fname, fetch_file(data, fname).read())
zip_buf.seek(0)
return zip_buf
def make_type_csv(type):
"""Take out the text and numerical attributes for each entry of a
certain type and output it to a csv."""
entries = get_all_entries()
# Open a buffer to store the data.
csv_buf = io.BytesIO()
# Get the import attributes
header = ['id', 'sn', 'type', 'notes']
for attr in session['info']['attr']['get_text']:
header.append(str(attr))
for attr in session['info']['attr']['get_num']:
header.append(str(attr))
for attr in session['info']['attr']['opt_num']:
header.append(str(attr))
# Write the header.
csv_buf.write(','.join(header) + '\n')
for doc in entries:
try:
if doc['type'] != type:
continue
data = []
for attr in header:
try:
val = str(doc[attr])
data.append(val)
except:
data.append('None')
csv_buf.write(','.join(data) + '\n')
# Some entries don't have complete data, so we don't want those.
except:
continue
csv_buf.seek(0)
return csv_buf
def save_to_db(data):
"""Save the entry to the db."""
db = get_db()
if 'id' not in data.keys():
data['id'] = get_num_entries() + 1
if 'sn' not in data.keys():
data['sn'] = data['id']
return db.save(data)
def get_db():
"""Connect to the database if open, and start database if not running."""
# See if we still have a good handle.
try:
g.db.info()
return g.db
except:
pass
# Attempt to load the couchdb server.
client = couchdb.Server(url=app.config['DB_URL'])
client.resource.credentials = (app.config['DB_USER'],
app.config['DB_PASS'])
# Make sure CouchDB is actually running.
# Check if the server is actually available.
attempts = 0
while True:
try:
client.version()
break
except:
attempts += 1
if attempts == 1:
app.logger.warning("Attempting to restart couchdb")
subprocess.call(['couchdb', '-b'])
time.sleep(1)
if (attempts > app.config['DB_NUM_RECONNECTS']):
app.logger.error("Lost all communication with CouchDB")
sys.exit()
# Connect to the database.
g.db = client[app.config['DB_NAME']]
return g.db
@app.before_first_request
def init_db():
"""Function makes sure we have access to the CouchDB server, the
proper database exists, and does a bit to clean the db.
"""
# Grab a handle to the specified server.
client = couchdb.Server(url=app.config['DB_URL'])
# Check if the server is actually available.
attempts = 0
while True:
try:
client.version()
break
except:
attempts += 1
if attempts == 1:
app.logger.warning("Attempting to restart couchdb")
subprocess.call(['couchdb', '-b'])
time.sleep(1)
if (attempts > app.config['DB_NUM_RECONNECTS']):
app.logger.error("Could not establish connection with CouchDB")
sys.exit(-1)
# See if the CouchDB is secured or is still an Admin Party.
try:
client[app.config['DB_NAME']]
app.logger.error("CouchDB is unsecured!")
app.logger.error("Try running the intial setup script")
sys.exit(-1)
except(couchdb.http.ResourceNotFound):
pass
except(couchdb.http.Unauthorized):
pass
# Now sign in with our credentials.
client.resource.credentials = (app.config['DB_USER'],
app.config['DB_PASS'])
# Try connecting to the database again.
try:
client[app.config['DB_NAME']]
except(couchdb.http.ResourceNotFound):
try:
client.create(app.config['DB_NAME'])
except(couchdb.http.Unauthorized):
app.logger.error("TravelerDB could not create a new database")
app.logger.error("Try running the intial setup script")
sys.exit(-1)
except(couchdb.http.Unauthorized):
app.logger.error("TravelerDB is not authorized to access the database")
sys.exit(-1)
try:
db = client[app.config['DB_NAME']]
except:
app.logger.error("TravelerDB could not connect to database")
sys.exit()
# Now that we have the database, let's primp it.
get_group_attr()
session['previous_type'] = {}
session['new_type'] = {}
session['radio_helper'] = {}
session['view_id'] = 0
try:
session['logged_in']
except:
session['logged_in'] = False
for group in session['groups']:
session['group_titles'][group] = group_attr[group]['title']
session['previous_type'][group] = None
for entry_type in group_attr[group]['types']:
# Set the current info type and get the id map to renumber.
try:
session['info']['type'] = entry_type
except:
session['info'] = {}
session['info']['type'] = entry_type
id_map = get_id_map()
for i, row in enumerate(id_map, start=1):
if i != row['key']:
doc = db[row['id']]
doc['id'] = i
db.save(doc)
session['info'] = {}
return db
| mit |
yuanagain/seniorthesis | venv/lib/python2.7/site-packages/matplotlib/sphinxext/only_directives.py | 4 | 2215 | #
# A pair of directives for inserting content that will only appear in
# either html or latex.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
class only_base(Body, Element):
def dont_traverse(self, *args, **kwargs):
return []
class html_only(only_base):
pass
class latex_only(only_base):
pass
def run(content, node_class, state, content_offset):
text = '\n'.join(content)
node = node_class(text)
state.nested_parse(content, content_offset, node)
return [node]
def html_only_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(content, html_only, state, content_offset)
def latex_only_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(content, latex_only, state, content_offset)
def builder_inited(app):
if app.builder.name == 'html':
latex_only.traverse = only_base.dont_traverse
else:
html_only.traverse = only_base.dont_traverse
def setup(app):
app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
# This will *really* never see the light of day As it turns out,
# this results in "broken" image nodes since they never get
# processed, so best not to do this.
# app.connect('builder-inited', builder_inited)
# Add visit/depart methods to HTML-Translator:
def visit_perform(self, node):
pass
def depart_perform(self, node):
pass
def visit_ignore(self, node):
node.children = []
def depart_ignore(self, node):
node.children = []
app.add_node(html_only,
html=(visit_perform, depart_perform),
latex=(visit_ignore, depart_ignore))
app.add_node(latex_only,
latex=(visit_perform, depart_perform),
html=(visit_ignore, depart_ignore))
| mit |
srinathv/bokeh | bokeh/_legacy_charts/builder/tests/test_boxplot_builder.py | 6 | 4882 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
import pandas as pd
import blaze
from bokeh._legacy_charts import BoxPlot
from ._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestBoxPlot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict([
('bronze', np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0])),
('silver', np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.])),
('gold', np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.]))
])
groups = ['bronze', 'silver', 'gold']
xyvaluesdf = pd.DataFrame(xyvalues)
xyvaluesbl = blaze.Data(xyvaluesdf)
exptected_datarect = {
'colors': ['#f22c40', '#5ab738', '#407ee7'],
'groups': ['bronze', 'silver', 'gold'],
'iqr_centers': [2.5, 2.5, 2.5],
'iqr_lengths': [3.0, 3.0, 4.5],
'lower_center_boxes': [1.25, 1.5, 1.125],
'lower_height_boxes': [0.5, 1.0, 1.75],
'upper_center_boxes': [2.75, 3.0, 3.375],
'upper_height_boxes': [2.5, 2.0, 2.75],
'width': [0.8, 0.8, 0.8]
}
expected_scatter = {
'colors': ['#f22c40'],
'out_x': ['bronze'],
'out_y': [10.0]
}
expected_seg = {
'lower': [-3.5, -3.5, -6.5],
'q0': [1.0, 1.0, 0.25],
'q2': [4.0, 4.0, 4.75],
'upper': [8.5, 8.5, 11.5]
}
for i, _xy in enumerate([xyvalues, xyvaluesdf, xyvaluesbl]):
bp = create_chart(BoxPlot, _xy, marker='circle', outliers=True)
builder = bp._builders[0]
self.assertEqual(sorted(builder._groups), sorted(groups))
for key, expected_v in exptected_datarect.items():
self.assertEqual(builder._data_rect[key], expected_v)
for key, expected_v in expected_scatter.items():
self.assertEqual(builder._data_scatter[key], expected_v)
for key, expected_v in expected_seg.items():
self.assertEqual(builder._data_segment[key], expected_v)
lvalues = [
np.array([7.0, 10.0, 8.0, 7.0, 4.0, 4.0, 1.0, 5.0, 2.0, 1.0,
4.0, 2.0, 1.0, 2.0, 4.0, 1.0, 0.0, 1.0, 1.0, 2.0,
0.0, 1.0, 0.0, 0.0, 1.0, 1.0]),
np.array([8., 4., 6., 4., 8., 3., 3., 2., 5., 6.,
1., 4., 2., 3., 2., 0., 0., 1., 2., 1.,
3., 0., 0., 1., 0., 0.]),
np.array([6., 6., 6., 8., 4., 8., 6., 3., 2., 2., 2., 1.,
3., 1., 0., 5., 4., 2., 0., 0., 0., 1., 1., 0., 0.,
0.])
]
groups = exptected_datarect['groups'] = ['0', '1', '2']
expected_scatter['out_x'] = ['0']
for i, _xy in enumerate([lvalues, np.array(lvalues)]):
bp = create_chart(BoxPlot, _xy, marker='circle', outliers=True)
builder = bp._builders[0]
self.assertEqual(sorted(builder._groups), sorted(groups))
for key, expected_v in exptected_datarect.items():
self.assertEqual(builder._data_rect[key], expected_v)
for key, expected_v in expected_scatter.items():
self.assertEqual(builder._data_scatter[key], expected_v)
for key, expected_v in expected_seg.items():
self.assertEqual(builder._data_segment[key], expected_v)
def test_no_outliers(self):
xyvalues = [7.0, 7.0, 8.0, 8.0, 9.0, 9.0]
bp = create_chart(BoxPlot, xyvalues, outliers=True)
builder = bp._builders[0]
outliers = builder._data_scatter['out_y']
self.assertEqual(len(outliers), 0)
| bsd-3-clause |
mtimmerm/IPythonNotebooks | plothelp.py | 1 | 1197 | import matplotlib
import matplotlib.pyplot as plt
import numpy
import math
def hermite_interp(dar,var,i):
n = math.floor(i)
if n>=len(dar)-1:
n=len(dar)-2
t = i-n
# Hermite basis functions
h00 = (2.0 * t**3) - (3.0 * t**2) + 1.0
h10 = t**3.0 - (2.0 * t**2) + t
h01 = (-2.0* t**3) + (3.0 * t**2)
h11 = t**3 - t**2
# Compute the interpolated value of "y"
return h00*dar[n] + h10*var[n] + h01*dar[n+1] + h11*var[n+1]
# first derivative of hermite spline
def hermite_interp1(dar,var,i):
n = math.floor(i)
if n>=len(dar)-1:
n=len(dar)-2
t = i-n
h00 = (6.0 * t**2) - (6.0 * t)
h10 = 3.0*t**2 - (4.0 * t) + 1
h01 = (-6.0* t**2) + (6.0 * t)
h11 = 3.0*t**2 - 2.0*t
return h00*dar[n] + h10*var[n] + h01*dar[n+1] + h11*var[n+1]
#second derivative of hermite spline
def hermite_interp2(dar,var,i):
n = math.floor(i)
if n>=len(dar)-1:
n=len(dar)-2
t = i-n
# Hermite basis functions
h00 = (12.0 * t) - 6.0
h10 = 6.0*t - 4.0
h01 = (-12.0* t) + 6.0
h11 = 6.0*t - 2.0
# Compute the interpolated value of "y"
return h00*dar[n] + h10*var[n] + h01*dar[n+1] + h11*var[n+1]
| apache-2.0 |
bosszhou/ThinkStats2 | code/survival.py | 65 | 17881 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas
import nsfg
import thinkstats2
import thinkplot
"""
Outcome codes from http://www.icpsr.umich.edu/nsfg6/Controller?
displayPage=labelDetails&fileCode=PREG§ion=&subSec=8016&srtLabel=611932
1 LIVE BIRTH 9148
2 INDUCED ABORTION 1862
3 STILLBIRTH 120
4 MISCARRIAGE 1921
5 ECTOPIC PREGNANCY 190
6 CURRENT PREGNANCY 352
"""
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, cdf, label=''):
self.cdf = cdf
self.label = label or cdf.label
@property
def ts(self):
return self.cdf.xs
@property
def ss(self):
return 1 - self.cdf.ps
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return 1 - self.cdf.Prob(t)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Mean(self):
"""Mean survival time."""
return self.cdf.Mean()
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazard(self, label=''):
"""Computes the hazard function.
sf: survival function
returns: Pmf that maps times to hazard rates
"""
ss = self.ss
lams = {}
for i, t in enumerate(self.ts[:-1]):
hazard = (ss[i] - ss[i+1]) / ss[i]
lams[t] = hazard
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
pmf = thinkstats2.Pmf()
for val, prob in self.cdf.Items():
pmf.Set(val, prob)
cutoff = self.cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkstats2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
pmf.Normalize()
d[t] = func(pmf) - t
#print(t, d[t])
return pandas.Series(d)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pandas.Series(d)
self.label = label
def __getitem__(self, t):
return self.series[t]
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
cdf = thinkstats2.Cdf(ts, 1-ss)
sf = SurvivalFunction(cdf, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last = self.series.index[-1]
more = other.series[other.series.index > last]
self.series = pandas.concat([self.series, more])
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkstats2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
return SurvivalFunction(thinkstats2.Cdf(cond))
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkstats2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkstats2.Cdf(complete, label='cdf')
sf = SurvivalFunction(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazard(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
cdf = thinkstats2.Cdf(complete)
sf = SurvivalFunction(cdf)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', shift=1e-7):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
shift: presumed additional survival of ongoing
"""
# pmf and sf of complete lifetimes
n = len(complete)
hist_complete = thinkstats2.Hist(complete)
sf_complete = SurvivalFunction(thinkstats2.Cdf(complete))
# sf for ongoing lifetimes
# The shift is a regrettable hack needed to deal with simultaneity.
# If a case is complete at some t and another case is ongoing
# at t, we presume that the ongoing case exceeds t+shift.
m = len(ongoing)
cdf = thinkstats2.Cdf(ongoing).Shift(shift)
sf_ongoing = SurvivalFunction(cdf)
lams = {}
for t, ended in sorted(hist_complete.Items()):
at_risk = ended + n * sf_complete[t] + m * sf_ongoing[t]
lams[t] = ended / at_risk
#print(t, ended, n * sf_complete[t], m * sf_ongoing[t], at_risk)
return HazardFunction(lams, label=label)
def CleanData(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
"""
resp.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def AddLabelsByDecade(groups, **options):
"""Draws fake points in order to add labels to the legend.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for _, group in groups:
hf, sf = EstimateSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
def ResampleSurvival(resp, iters=101):
"""Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
"""
_, sf = EstimateSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
thinkplot.Save(root='survival3',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
formats=FORMATS)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
hf, sf = EstimateSurvival(resp)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2',
xlabel='age (years)',
ylabel='prob unmarried',
ylim=[0, 1],
legend=False,
formats=FORMATS)
return sf
def PlotPregnancyData(preg):
"""Plots survival and hazard curves based on pregnancy lengths.
preg:
"""
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
PlotSurvival(complete)
thinkplot.Save(root='survival1',
xlabel='t (weeks)',
formats=FORMATS)
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return sf
def PlotRemainingLifetime(sf1, sf2):
"""Plots remaining lifetimes for pregnancy and age at first marriage.
sf1: SurvivalFunction for pregnancy length
sf2: SurvivalFunction for age at first marriage
"""
thinkplot.PrePlot(cols=2)
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='pregnancy length',
xlabel='weeks',
ylabel='mean remaining weeks')
thinkplot.SubPlot(2)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='age at first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='age (years)',
ylabel='median remaining years')
thinkplot.Save(root='survival6',
formats=FORMATS)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
**options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
CleanData(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt']
resp = ReadFemResp(usecols=usecols)
CleanData(resp)
return resp
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16']
resp = ReadFemResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgtq1q16
CleanData(resp)
return resp
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013']
resp = ReadFemResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgt2011_2013
CleanData(resp)
return resp
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['a_doi', 'timesmar', 'mardat01', 'bdaycenm', 'post_wt']
colspecs = [(12359, 12363),
(3538, 3540),
(11758, 11762),
(13, 16),
(12349, 12359)]
df = pandas.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
df['cmmarrhx'] = df.mardat01
df['cmbirth'] = df.bdaycenm
df['cmintvw'] = df.a_doi
df['finalwgt'] = df.post_wt
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0).astype(int)
CleanData(df)
return df
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['cmmarrhx', 'MARNO', 'cmintvw', 'cmbirth', 'finalwgt']
#actual = ['MARIMO', 'MARNO', 'TL', 'TL', 'W5']
colspecs = [(1028, 1031),
(1258, 1259),
(841, 844),
(12, 15),
(976, 982)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
df.MARNO.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.MARNO > 0).astype(int)
CleanData(df)
return df[:7969]
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1988FemRespData.dat.gz'
names = ['F_13'] #['CMOIMO', 'F_13', 'F19M1MO', 'A_3']
# colspecs = [(799, 803)],
colspecs = [(20, 22)]#,
# (1538, 1542),
# (26, 30),
# (2568, 2574)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
# df['cmmarrhx'] = df.F19M1MO
# df['cmbirth'] = df.A_3
# df['cmintvw'] = df.CMOIMO
# df['finalwgt'] = df.W5
df.F_13.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.F_13 > 0).astype(int)
# CleanData(df)
return df
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
"""Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
"""
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateSurvivalByDecade(groups, alpha=0.1)
else:
EstimateSurvivalByDecade(groups, alpha=0.2)
def main():
thinkstats2.RandomSeed(17)
preg = nsfg.ReadFemPreg()
sf1 = PlotPregnancyData(preg)
# make the plots based on Cycle 6
resp6 = ReadFemResp2002()
sf2 = PlotMarriageData(resp6)
ResampleSurvival(resp6)
PlotRemainingLifetime(sf1, sf2)
# read Cycles 5 and 7
resp5 = ReadFemResp1995()
resp7 = ReadFemResp2010()
# plot resampled survival functions by decade
resps = [resp5, resp6, resp7]
PlotResampledByDecade(resps)
thinkplot.Save(root='survival4',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
# plot resampled survival functions by decade, with predictions
PlotResampledByDecade(resps, predict_flag=True, omit=[5])
thinkplot.Save(root='survival5',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
if __name__ == '__main__':
main()
| gpl-3.0 |
jasonmhite/vuqutils | vuqutils/plot/distplots.py | 1 | 1578 | import seaborn as sb
import pandas as pd
__all__ = ["uncertainty_plot"]
def uncertainty_plot(data, center=True, relative=False, filternull=True):
"""
Take a Series object and make a relative uncertainty plot. This
centers the plot and annotates with mean and standard deviation.
Parameters
----------
data: a Pandas Series object that contains the data.
center: If true, center on mean. If false, do no centering.
Else, treat center as an explicit value to center on.
Default true.
filternull: If true (default), filter out null values in the data.
relative: If true, normalize width to the standard deviation
(default false)
"""
if filternull is True:
plotdata = data[data.notnull()]
else:
plotdata = data
if relative is True:
plotdata /= plotdata.std()
if center is True:
center = 0.
plotdata -= plotdata.mean()
elif center is False:
center = plotdata.mean()
elif center is not False: # Center is a number
if relative is True:
raise ValueError("Can't have relative and specified center")
else:
plotdata -= center
bbox_style = {'boxstyle': 'round', 'fc': 'white', 'lw': 2,
'alpha': .7, 'ec': 'grey',}
anntext = """$\mu={}$
$\sigma={}$""".format(data.mean(), data.std())
ax = sb.distplot(plotdata)
ax.text(.02, .88, anntext,
transform=ax.transAxes,
bbox=bbox_style,
)
ax.axvline(center, color='black')
return(ax)
| bsd-3-clause |
nayyarv/MonteGMM | Inference/ActualInf.py | 1 | 2549 | import numpy as np
from Utils.MFCCArrayGen import getIndiviudalData, emotions, speakers
# from sklearn.metrics import confusion_matrix
from RobustLikelihoodClass import Likelihood
import os
def BayesProb(utterance, numMixtures, means, diagCovs, weights):
"""
Given the MCMC values from a run, calculate probability of belonging to that class
:param utterance: np.array of shape [size][dim]
:param numMixtures:
:param means: np.array [numMCMCRuns][numMixtures][dim]
:param diagCovs: np.array [numMCMCRuns][numMixtures][dim]
:param weights: np.array [numMCMCRuns][numMixtures]
:return:
"""
sys.stdout = open(os.devnull, "w")
llEval = Likelihood(utterance, numMixtures=8)
sys.stdout = sys.__stdout__
prob = 0
for i in xrange(means.shape[0]):
prob+= llEval.loglikelihood(means[i], diagCovs[i], weights[i])
# print prob/means.shape[0]
return prob/means.shape[0]
def main(speakerIndex=0):
y_test = []
y_pred = []
numMixtures = 8
import cPickle
results = {}
for emotion in emotions:
filename = "../deciSpeechMCMC/{}-{}.txt".format(emotion, speakers[speakerIndex])
print filename
results[emotion] = {}
with open(filename) as f:
MCMCmeans, MCMCcovs, MCMCweights = cPickle.load(f)
for testEmotion in emotions:
testCorpus = getIndiviudalData(testEmotion, speakers[speakerIndex])
print "Actual Emotion: {}".format(testEmotion)
emotRes = np.zeros(len(testCorpus))
i = 0
for utterance in testCorpus:
ll = -BayesProb(utterance, 8, MCMCmeans, MCMCcovs, MCMCweights)
emotRes[i] = ll
i+=1
results[emotion][testEmotion] = emotRes
#Search for min
for actualEmotion in emotions:
valList = []
for testAgainstEmotion in emotions:
lls = results[testAgainstEmotion][actualEmotion]
valList.append(lls.reshape(len(lls),1))
valList = np.hstack(valList)
# print valList
assert (valList.shape[1] ==len(emotions))
emotIndex = valList.argmin(1) #since we saved as positive
classifiedEmotions = [emotions[i] for i in emotIndex]
TrueEmotes = [actualEmotion] * valList.shape[0]
y_test.extend(TrueEmotes)
y_pred.extend(classifiedEmotions)
#some Measure of inference
print y_test
print y_pred
import sys
if __name__ == '__main__':
main(int(sys.argv[1])) | mit |
chrsrds/scikit-learn | sklearn/tests/test_docstring_parameters.py | 1 | 5292 | # Authors: Alexandre Gramfort <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
import inspect
import warnings
import importlib
from pkgutil import walk_packages
from inspect import signature
import sklearn
from sklearn.utils import IS_PYPY
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_docstring_parameters
from sklearn.utils.testing import _get_func_name
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.deprecation import _is_deprecated
import pytest
PUBLIC_MODULES = set([pckg[1] for pckg in walk_packages(prefix='sklearn.',
path=sklearn.__path__)
if not ("._" in pckg[1] or ".tests." in pckg[1])])
# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
'sklearn.utils.deprecation.load_mlcomp',
'sklearn.pipeline.make_pipeline',
'sklearn.pipeline.make_union',
'sklearn.utils.extmath.safe_sparse_dot',
'sklearn.utils._joblib'
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
'fit',
'score',
'fit_predict',
'fit_transform',
'partial_fit',
'predict'
]
# numpydoc 0.8.0's docscrape tool raises because of collections.abc under
# Python 3.7
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.mark.skipif(IS_PYPY, reason='test segfaults on PyPy')
def test_docstring_parameters():
# Test module docstring formatting
# Skip test if numpydoc is not found
try:
import numpydoc # noqa
except ImportError:
raise SkipTest("numpydoc is required to test the docstrings")
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
if name == 'sklearn.utils.fixes':
# We cannot always control these docstrings
continue
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
# Exclude imported classes
classes = [cls for cls in classes if cls[1].__module__ == name]
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES or cname.startswith('_'):
continue
if inspect.isabstract(cls):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError('Error for __init__ of %s in %s:\n%s'
% (cls, name, w[0]))
cls_init = getattr(cls, '__init__', None)
if _is_deprecated(cls_init):
continue
elif cls_init is not None:
this_incorrect += check_docstring_parameters(
cls.__init__, cdoc)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if ('y' in sig.parameters and
sig.parameters['y'].default is None):
param_ignore = ['y'] # ignore y for fit and score
result = check_docstring_parameters(
method, ignore=param_ignore)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
# Exclude imported functions
functions = [fn for fn in functions if fn[1].__module__ == name]
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith('_'):
continue
if fname == "configuration" and name.endswith("setup"):
continue
name_ = _get_func_name(func)
if (not any(d in name_ for d in _DOCSTRING_IGNORES) and
not _is_deprecated(func)):
incorrect += check_docstring_parameters(func)
msg = '\n'.join(incorrect)
if len(incorrect) > 0:
raise AssertionError("Docstring Error:\n" + msg)
@ignore_warnings(category=DeprecationWarning)
def test_tabs():
# Test that there are no tabs in our source files
for importer, modname, ispkg in walk_packages(sklearn.__path__,
prefix='sklearn.'):
if IS_PYPY and ('_svmlight_format' in modname or
'feature_extraction._hashing' in modname):
continue
# because we don't import
mod = importlib.import_module(modname)
try:
source = inspect.getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert '\t' not in source, ('"%s" has tabs, please remove them ',
'or add it to theignore list'
% modname)
| bsd-3-clause |
lyhrobin00007/FlaskCTA | app/pyOption/mc_test.py | 1 | 5838 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 02 14:08:57 2016
@author: 020242
"""
#import ChinaCalendar
#from QuantLib import *
#import datetime
#import csv
#import pandas as pd
import numpy as np
from time import time
from random import gauss, seed
import random
from math import exp, sqrt, log
from numba import jit
#=====================================================
#FR007IRS5Y 2016-12-01
@jit
def calcMC(S0,sigma,r,dt,M,I):
S = [None] * I
for i in range(I):
S[i] = [0]*(M+1)
#end for
for i in range(I):
path = S[i]
for t in range(M+1):
if t == 0:
path[t] = S0
else:
# z = gauss(0.0,1.0)
z = random.normalvariate(0.0,1.0)
St = path[t-1] * exp((r - 0.05 * sigma ** 2) * dt + sigma * sqrt(dt) * z)
path[t] = St
#end if
#end for
S[i] = path
#end for
return S
#end def
#S0 = 0.0357
#S0 = 0.029893 #十年国债收益率
S0 = 0.029893
#T = 91. / 365
T = 0.25
#sigma = 0.103993516
#sigma = 0.056285 #十年国债年化波动率
sigma = 0.13
#sigma = 0.2
r = 0.027
M = 91
dt = T / M
I = 250000
firmRate = 0.0372
#seed(20000)
S = calcMC(S0,sigma,r,dt,M,I)
#------------------------------------------------------
#Type 1: Down and Out Alternative
#1. 如果未曾触发敲出时事件,年化收益率R0
#2. 如果敲出,且期末收益率水平大院100%,年化收益率R0
#3. 如果敲出,且期末收益率水平小于100%, 年化收益率0
knockOut = 0.95
R0 = 0.04
p1 = float(sum([min(path) > S0 * knockOut for path in S])) / I
p2 = float(sum([min(path) < S0 * knockOut and path[-1] >= S0 for path in S])) / I
p3 = float(sum([min(path) < S0 * knockOut and path[-1] < S0 for path in S])) / I
optionPrice = R0 * (p1+p2) + 0 * p3
print 'knock-out option %.10f, base rate: %.10f'%(optionPrice,firmRate - optionPrice)
#------------------------------------------------------
#Type 2: Double No Touch
#1. 如果未曾触发敲出时事件,年化收益率R0
#3. 如果敲出,且期末收益率水平小于100%, 年化收益率0
knockOut1 = 0.95
knockOut2 = 1.05
R0 = 0.03
p1 = float(sum([min(path) > S0 * knockOut1 and min(path) < S0 * knockOut2 for path in S])) / I
p2 = float(sum([min(path) < S0 * knockOut1 or min(path) > S0 * knockOut2 for path in S])) / I
optionPrice = R0 * p1 + 0 * p2
print 'double-no-touch option %.10f, base rate: %.10f'%(optionPrice,firmRate - optionPrice)
#------------------------------------------------------
#Type 3: Out of range rate
#1. 最终受益=超出规定范围的天数/总天数 *R0
range1 = 0.95
range2 = 1.05
R0 = 0.03
p1 = sum([float(sum([i >= S0 * range1 and i <= S0 * range2 for i in path])) / (M+1) for path in S]) / I
#p2 = sum([float(sum([i > S0 * range2 or i < S0 * range1 for i in path])) / (M+1) for path in S]) / I
optionPrice = R0 * p1
print 'out-of-range option %.10f, base rate: %.10f'%(optionPrice,firmRate - optionPrice)
#------------------------------------------------------
#Type 4: 温和看涨期权
#1. 如果未曾触发敲出时事件,年化收益率为max(0,期末价格/期初价格-100%)
#2. 如果敲出,且期末收益率水平大院100%,年化收益率0
knockOut = 1.1
R0 = 0.01
payOff = []
for path in S:
if max(path) > S0 * knockOut:
payOff.append(R0)
else:
payOff.append(max(path[-1] / S0 - 1,0))
#end if
#end for
p1 = float(sum([max(path) >= S0 * knockOut for path in S])) / I
p2 = float(sum([max(path) < S0 * knockOut and path[-1] <= S0 for path in S])) / I
p3 = float(sum([max(path) < S0 * knockOut and path[-1] > S0 for path in S])) / I
#p2 = sum([float(sum([i > S0 * range2 or i < S0 * range1 for i in path])) / (M+1) for path in S]) / I
optionPrice = np.mean(payOff)
print '温和看涨期权: %.10f, base rate: %.10f'%(optionPrice,firmRate - optionPrice)
#------------------------------------------------------
#Type 5: call-spread
baseRate = 0
participationRate = 0.75
point1 = 1
point2 = 1.1
#payOff = float(sum([baseRate + np.min((knockOut2-knockOut1)*participationRate,np.max((path[-1] / S0 - 1) * participationRate,0)) for path in S])) / I
payOff = []
for path in S:
if path[-1] <= S0 * point1:
payOff.append(baseRate*T)
elif path[-1] >= S0 * point2:
payOff.append((baseRate + (point2-point1)*participationRate)*T)
else:
payOff.append((baseRate + (path[-1] / S0 - point1) * participationRate)*T)
#end if
#end for
p1 = float(sum([max(path) <= S0 * point1 for path in S])) / I
p2 = float(sum([max(path) > S0 * point1 and path[-1] < S0 * point2 for path in S])) / I
p3 = float(sum([max(path) >= S0 * point2 for path in S])) / I
#p2 = sum([float(sum([i > S0 * range2 or i < S0 * range1 for i in path])) / (M+1) for path in S]) / I
optionPrice = np.mean(payOff)
print 'call-spread: %.10f, base rate: %.10f'%(optionPrice,firmRate - optionPrice)
#
##------------------------------------------------------
##Type 6: call
#
##payOff = []
##for path in S:
## if path[-1] <= S0 * point1:
## payOff.append(baseRate*T)
## elif path[-1] >= S0 * point2:
## payOff.append((baseRate + (point2-point1)*participationRate)*T)
## else:
## payOff.append((baseRate + (path[-1] / S0 - point1) * participationRate)*T)
## #end if
###end for
##
##p1 = float(sum([max(path) <= S0 * point1 for path in S])) / I
##p2 = float(sum([max(path) > S0 * point1 and path[-1] < S0 * point2 for path in S])) / I
##p3 = float(sum([max(path) >= S0 * point2 for path in S])) / I
#
##p2 = sum([float(sum([i > S0 * range2 or i < S0 * range1 for i in path])) / (M+1) for path in S]) / I
#
#optionPrice = float(sum([max(0.0,path[-1] / S0 - 1) for path in S])) / I
#print 'call: %.10f, base rate: %.10f'%(optionPrice,firmRate - optionPrice)
| mit |
khkaminska/bokeh | bokeh/protocol.py | 37 | 3282 | from __future__ import absolute_import
import json
import logging
import datetime as dt
import calendar
import decimal
from .util.serialization import transform_series, transform_array
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
from .settings import settings
log = logging.getLogger(__name__)
class BokehJSONEncoder(json.JSONEncoder):
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
# Pandas Timestamp
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / 10**6.0 #nanosecond to millisecond
elif np.issubdtype(type(obj), np.float):
return float(obj)
elif np.issubdtype(type(obj), np.int):
return int(obj)
elif np.issubdtype(type(obj), np.bool_):
return bool(obj)
# Datetime
# datetime is a subclass of date.
elif isinstance(obj, dt.datetime):
return calendar.timegm(obj.timetuple()) * 1000. + obj.microsecond / 1000.
# Date
elif isinstance(obj, dt.date):
return calendar.timegm(obj.timetuple()) * 1000.
# Numpy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')
return (epoch_delta / np.timedelta64(1, 'ms'))
# Time
elif isinstance(obj, dt.time):
return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
# Decimal
elif isinstance(obj, decimal.Decimal):
return float(obj)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return transform_series(obj)
elif isinstance(obj, np.ndarray):
return transform_array(obj)
elif isinstance(obj, PlotObject):
return obj.ref
elif isinstance(obj, HasProps):
return obj.changed_properties_with_values()
elif isinstance(obj, Color):
return obj.to_css()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
if settings.pretty(False):
kwargs["indent"] = 4
return json.dumps(obj, cls=encoder, allow_nan=False, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
| bsd-3-clause |
oscarlazoarjona/quantum_memories | quantum_memories/settings_lambda.py | 1 | 11371 | # -*- coding: utf-8 -*-
# ***********************************************************************
# Copyright (C) 2016 - 2017 Oscar Gerardo Lazo Arjona *
# <[email protected]> *
# ***********************************************************************
r"""This file establishes all the parameters needed."""
# We import fundamental constants:
from scipy.constants import c, hbar, epsilon_0
from scipy.constants import physical_constants
from math import sqrt, log, pi
from misc import vapour_number_density
# These flags control whether my software FAST [1] is used to calculate
# the parameters of the atom or the Bloch equations.
rewrite = True; rewrite = False
calculate_atom = False # ; calculate_atom = True
calculate_bloch = False # ; calculate_bloch=True
make_smoother = True # ; make_smoother=False
change_rep_rate = True # ; change_rep_rate=False
change_read_power = True # ; change_read_power=False
ignore_lower_f = False; ignore_lower_f = True
run_long = False; run_long = True
optimize = True; optimize = False
verbose = 1
# We choose the units we want.
units = "SI" # ; units="fancy"
if verbose >= 2: print "We are using "+units+" units!"
a0 = physical_constants["Bohr radius"][0]
e_charge = physical_constants["elementary charge"][0]
kB = physical_constants["Boltzmann constant"][0]
# The memory protocol to be used.
red_detuned = True
magic = False
# The extent of the simulation given by the number of dynamic variables
# Nrho, the number of time steps Nt, and the number of z points Nz.
Nrho = 2
Nt = 25500; Nz = 50
# The number of velocity groups to consider (better an odd number)
Nv = 9
# The number of standard deviations to consider on either side of the velocity
# distribution.
Nsigma = 4
# The data for the time discretization.
# The total time of the simulation (in s).
T = 8e-9
# T = 16e-9
# The time step.
dt = T/(Nt-1)
# The data for the spacial discretization.
# Cell length (in m).
L = 0.072
# Spatial extent of the simulation (in m).
D = 1.05 * L
optical_depth = 0.05e5
# The simulation will be done spanning -D/2 <= z <= D/2
zL = -0.5 * D # left boundary of the simulation
zR = +0.5 * D # right boundary of the simulation
######################
# The temperature of the cell.
Temperature = 90.0 + 273.15
# We should be able to choose whether to keep all of data, to just keep a
# sample at a certain rate, or to keep only the current-time data.
keep_data = "all"
keep_data = "sample"
# The sampling rate for the output. If sampling_rate=2 every second time step
# will be saved in memory and returned. If Nt is a multiple of sampling_rate
# then the length of the output should be Nt/sampling_rate.
sampling_rate = 50
################################################
# The characteristics of the beams:
# The waists of the beams (in m):
w1 = 280e-6
w2 = 320e-6
# The full widths at half maximum of the gaussian envelope of the powers
# spectra (in Hz).
sigma_power1 = 1.0e9
sigma_power2 = 1.0e9
sigma_power1 = 0.807222536902e9
sigma_power1 = 1.0e9
sigma_power2 = 0.883494520871e9
# We calculate the duration of the pulses from the standard deviations
tau1 = 2/pi * sqrt(log(2.0))/sigma_power1
tau2 = 2/pi * sqrt(log(2.0))/sigma_power2
tau1 = 2*sqrt(2)*log(2)/pi / sigma_power1
tau2 = 2*sqrt(2)*log(2)/pi / sigma_power2
# The time of arrival of the beams
t0s = 1.1801245283489222e-09
t0w = t0s
t0r = t0w + 3.5e-9
alpha_rw = 1.0
t_cutoff = t0r+D/2/c+tau1
t_cutoff = 3.0e-9
######################
# The detuning of the (in rad/s):
if red_detuned:
delta1 = -2*pi*6e9
else:
delta1 = 2*pi*6e9
# The detuning of the control field (in rad/s):
delta2 = delta1
# This is the two-photon transition condition.
######################
# We choose an atom:
element = "Rb"; isotope = 85; n_atom = 5
element = "Rb"; isotope = 87; n_atom = 5
element = "Cs"; isotope = 133; n_atom = 6
n_atomic0 = vapour_number_density(Temperature, element)
# We calculate (or impose) the properties of the atom:
if calculate_atom:
from fast import State, Transition, make_list_of_states
from fast import calculate_boundaries, Integer, calculate_matrices
from fast import fancy_r_plot, fancy_matrix_plot, Atom
from matplotlib import pyplot
atom = Atom(element, isotope)
g = State(element, isotope, n_atom, 0, 1/Integer(2))
e = State(element, isotope, n_atom, 1, 3/Integer(2))
fine_states = [g, e]
hyperfine_states = make_list_of_states(fine_states, "hyperfine", verbose=0)
magnetic_states = make_list_of_states(fine_states, "magnetic", verbose=0)
aux = calculate_boundaries(fine_states, magnetic_states)
fine_bounds, hyperfine_bounds = aux
g1 = hyperfine_states[0]
g2 = hyperfine_states[1]
print fine_bounds, hyperfine_bounds
g1_index = hyperfine_bounds[0][0]
e1_index = fine_bounds[1][0]
g2_index = hyperfine_bounds[1][-1]-1
e2_index = fine_bounds[1][-1]-1
if verbose >= 1:
print
print "Calculating atomic properties ..."
print "We are choosing the couplings of"
print magnetic_states[g1_index], magnetic_states[e1_index]
print "For the g1 <-> e transition, and the couplings of"
print magnetic_states[g2_index], magnetic_states[e2_index]
print "For the g2 <-> e transition."
# We calculate the matrices for the given states.
Omega = 1.0 # We choose the calculated frequencies to be in radians.
omega, gamma, r = calculate_matrices(magnetic_states, Omega)
# We plot these matrices.
path = ''; name = element+str(isotope)
fig = pyplot.figure(); ax = fig.add_subplot(111)
fancy_matrix_plot(ax, omega, magnetic_states, path, name+'_omega.png',
take_abs=True, colorbar=True)
fig = pyplot.figure(); ax = fig.add_subplot(111)
fancy_matrix_plot(ax, gamma, magnetic_states, path, name+'_gamma.png',
take_abs=True, colorbar=True)
fig = pyplot.figure(); ax = fig.add_subplot(111)
fancy_r_plot(r, magnetic_states, path, name+'_r.png',
complex_matrix=True)
pyplot.close("all")
# We get the parameters for the simplified scheme.
# The couplings.
r31 = r[2][e1_index][g1_index]
r32 = r[2][e2_index][g2_index]
# The FAST function calculate_matrices always returns r in
# Bohr radii, so we convert. By contrast, it returns omega
# and gamma in units scaled by Omega. If Omega=1e6 this means
# 10^6 rad/s. So we do not have to rescale omega or gamma.
r31 = r31*a0
r32 = r32*a0
# The decay frequencies.
if red_detuned:
gamma31 = gamma[e2_index][g2_index]
gamma32 = 0
else:
gamma31 = 0
gamma32 = gamma[e2_index][g2_index]
omega31 = Transition(e, g1).omega
omega32 = Transition(e, g2).omega
omega21 = Transition(g2, g1).omega
print gamma31, gamma32
print omega31, omega32, omega21
print r31, r32
print atom.mass
else:
if (element, isotope) == ("Rb", 85):
if red_detuned:
gamma31, gamma32 = (38107518.888, 0.0)
else:
gamma31, gamma32 = (0.0, 38107518.888)
omega31, omega32 = (2.41420163422e+15, 2.41418256014e+15)
omega21 = 19074074839.2
r31, r32 = (1.73263595681e-10, 2.23682340192e-10)
mass = 1.40999341816e-25
elif (element, isotope) == ("Rb", 87):
if red_detuned:
gamma31, gamma32 = (38107518.888, 0.0)
else:
gamma31, gamma32 = (0.0, 38107518.888)
omega31, omega32 = (2.41421734741e+15, 2.41417440383e+15)
omega21 = 42943577360.1
r31, r32 = (1.29143059323e-10, 2.23682340192e-10)
mass = 1.44316087206e-25
elif (element, isotope) == ("Cs", 133):
if red_detuned:
gamma31, gamma32 = (32886191.8978, 0.0)
else:
gamma31, gamma32 = (0.0, 32886191.8978)
omega31, omega32 = (2.20999035592e+15, 2.20993259691e+15)
omega21 = 57759008871.6
r31, r32 = (2.00516655729e-10, 2.37254506627e-10)
mass = 2.2069469161e-25
# The frequencies of the optical fields.
omega_laser1 = delta1 + omega31
omega_laser2 = delta2 + omega32
# A decoherence frequency
gammaB = 2*pi*15e6
# ######################
# The energies of the photons.
energy_phot1 = hbar*omega_laser1
energy_phot2 = hbar*omega_laser2
# The energies of the pulses.
if (magic and red_detuned) or (not magic and not red_detuned):
energy_pulse31 = 25e-12 # Joules.
energy_pulse32 = 1*energy_phot2 # Joules.
else:
energy_pulse31 = 1*energy_phot1 # Joules.
energy_pulse32 = 25e-12 # Joules.
################################################
# The fancy units should be picked so that the factors multiplied in
# each of the terms of the equations are of similar magnitude.
# Ideally, the various terms should also be of similar magnitude, but
# changing the units will not change the relative importance of terms.
# Otherwise physics would change depending on the units!
# However, it should be possible to choose units such that the largest
# terms should be close to 1.
if units == "SI":
Omega = 1.0 # The frequency unit in Hz.
distance_unit = 1.0 # The distance unit in m.
elif units == "fancy":
# The frequency scale for frequency in Hz.
Omega = 1e9
# Omega=1.0
# The distance unit in m.
distance_unit = 1.0
# An interesting option would be to make
# distance_unit=c*Omega
# This way the beams will propagate in time-space diagrams at 45 degree
# angles.
# To use fancy units we need to rescale fundamental constants.
# [ hbar ] = J s
# = kg * m^2 * s / s^2
# = kg * m^2 * Hz
hbar = hbar/distance_unit**2 / Omega # fancy units.
# [ epsilon_0 ] = A^2 * s^4 / ( kg * m^3 )
# = C^2 * s^2 / ( kg * m^3 )
# = C^2 / ( Hz^2 kg * m^3 )
epsilon_0 = epsilon_0 * Omega**2 * distance_unit**3
# [ c ] = m / s
c = c/distance_unit/Omega
# [ kB ] = J K^-1
# = (kg m^2/s^2) K^-1
# = (kg m^2 Hz^2) K^-1
kB = kB / distance_unit**2 / Omega**2
# Rescale time:
T = T*Omega
dt = dt*Omega
# We must also rescale the cell:
L = L/distance_unit
D = D/distance_unit
zL = zL/distance_unit
zR = zR/distance_unit
# We must also rescale the characteristics of the pulses.
w1 = w1/distance_unit
w2 = w2/distance_unit
sigma_power1 = sigma_power1/Omega
sigma_power2 = sigma_power2/Omega
tau1 = tau1*Omega
tau2 = tau2*Omega
t0s = t0s*Omega
t0w = t0w*Omega
t0r = t0r*Omega
t_cutoff = t_cutoff*Omega
gamma31 = gamma31/Omega
gamma32 = gamma32/Omega
omega21 = omega21/Omega
omega31 = omega31/Omega
omega32 = omega32/Omega
delta1 = delta1/Omega
delta2 = delta2/Omega
omega_laser1 = omega_laser1/Omega
omega_laser2 = omega_laser2/Omega
gammaB = gammaB/Omega
r31 = r31/distance_unit
r32 = r32/distance_unit
# J = kg * m^2 / s^2
energy_phot1 = energy_phot1 / distance_unit**2 / Omega**2
energy_phot2 = energy_phot2 / distance_unit**2 / Omega**2
energy_pulse1 = energy_pulse1 / distance_unit**2 / Omega**2
energy_pulse2 = energy_pulse2 / distance_unit**2 / Omega**2
# [1] https://github.com/oscarlazoarjona/fast
| gpl-3.0 |
credp/lisa | lisa/notebook.py | 2 | 10449 | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2019, Arm Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Various utilities for interactive notebooks.
"""
import functools
import collections
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backend_bases import MouseButton
from cycler import cycler as make_cycler
import mplcursors
from ipywidgets import widgets, Layout, interact
from IPython.display import display
from lisa.utils import is_running_ipython
COLOR_CYCLE = [
'#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00'
]
"""
Colorblind-friendly cycle, see https://gist.github.com/thriveth/8560036
"""
plt.rcParams['axes.prop_cycle'] = make_cycler(color=COLOR_CYCLE)
class WrappingHBox(widgets.HBox):
"""
HBox that will overflow on multiple lines if the content is too large to
fit on one line.
"""
def __init__(self, *args, **kwargs):
layout = Layout(
# Overflow items to the next line rather than hiding them
flex_flow='row wrap',
# Evenly spread on one line of items
justify_content='space-around',
)
super().__init__(*args, layout=layout, **kwargs)
# Make a subclass so we can integrate better with mplcursors
class _DataframeLinkMarker(mpl.lines.Line2D):
pass
# Tell mplcursors that we are never selecting the marker line, so that it
# will still show the coordinates of the data that were plotted, rather
# than useless coordinates of the marker
@mplcursors.compute_pick.register(_DataframeLinkMarker)
def _(artist, event):
return None
def _make_vline(axis, *args, **kwargs):
vline = axis.axvline(*args, **kwargs)
assert type(vline) is mpl.lines.Line2D # pylint: disable=unidiomatic-typecheck
vline.__class__ = _DataframeLinkMarker
vline.set_visible(False)
return vline
def axis_link_dataframes(axis, df_list, before=1, after=5, cursor_color='red', follow_cursor=False):
"""
Link some dataframes to an axis displayed in the interactive matplotlib widget.
:param axis: Axis to link to.
:type axis: matplotlib.axes.Axes
:param df_list: List of pandas dataframe to link.
:type df_list: list(pandas.DataFrame)
:param before: Number of dataframe rows to display before the selected
location.
:type before: int
:param after: Number of dataframe rows to display after the selected
location.
:type after: int
:param cursor_color: Color of the vertical line added at the clicked
location.
:type cursor_color: str
:param follow_cursor: If ``True``, the cursor will be followed without the
need to click.
:type follow_cursor: bool
When the user clicks on the graph, a vertical marker will appear and the
dataframe slice will update to show the relevant row.
.. note:: This requires the matplotlib widget enabled using ``%matplotlib
widget`` magic.
"""
df_list = [df for df in df_list if not df.empty]
output_list = [widgets.Output() for df in df_list]
layout = Layout(
# Overflow items to the next line rather than hiding them
flex_flow='row wrap',
# Evenly spread on one line of item when there is more than one item,
# align left otherwise
justify_content='space-around' if len(df_list) > 1 else 'flex-start',
)
hbox = widgets.HBox(output_list, layout=layout)
cursor_vline = _make_vline(axis, color=cursor_color)
def show_loc(loc):
cursor_vline.set_xdata(loc)
cursor_vline.set_visible(True)
for df, output in zip(df_list, output_list):
if loc < df.index[0]:
iloc = 0
elif loc > df.index[-1]:
iloc = -1
else:
iloc = df.index.get_loc(loc, method='ffill')
index_loc = df.index[iloc]
begin = max(iloc - before, 0)
end = min(iloc + after, len(df))
sliced_df = df.iloc[begin:end]
def highlight_row(row):
if row.name == index_loc: # pylint: disable=cell-var-from-loop
return ['background: lightblue'] * len(row)
else:
return [''] * len(row)
styler = sliced_df.style.apply(highlight_row, axis=1)
styler = styler.set_properties(**{
'text-align': 'left',
# perserve multiple consecutive spaces
'white-space': 'pre',
# Make sure all chars have the same width to preserve column
# alignments in preformatted strings
'font-family': 'monospace',
})
# wait=True avoids flicker by waiting for new content to be ready
# to display before clearing the previous one
output.clear_output(wait=True)
with output:
display(styler)
init_loc = min((df.index[0] for df in df_list), default=0)
show_loc(init_loc)
def handler(event):
loc = event.xdata
return show_loc(loc)
event = 'motion_notify_event' if follow_cursor else 'button_press_event'
axis.get_figure().canvas.mpl_connect(event, handler)
display(hbox)
def axis_cursor_delta(axis, colors=('blue', 'green'), buttons=(MouseButton.LEFT, MouseButton.RIGHT)):
"""
Display the time delta between two vertical lines drawn on clicks.
:param axis: Axis to link to.
:type axis: matplotlib.axes.Axes
:param colors: List of colors to use for vertical lines.
:type colors: list(str)
:param buttons: Mouse buttons to use for each vertical line.
:type buttons: list(matplotlib.backend_bases.MouseButton)
.. note:: This requires the matplotlib widget enabled using
``%matplotlib widget`` magic.
"""
delta_widget = widgets.Text(
value='0',
placeholder='0',
description='Cursors delta',
disabled=False,
)
vlines = [
_make_vline(axis, color=color)
for color in colors
]
assert len(vlines) == 2
vlines_map = dict(zip(buttons, vlines))
vlines_loc = collections.defaultdict(
lambda: min(axis.get_xbound())
)
def handler(event):
loc = event.xdata
button = event.button
vline = vlines_map[button]
vlines_loc[button] = loc
vline.set_xdata(loc)
vline.set_visible(True)
locs = [
vlines_loc[button]
for button in buttons
]
delta = locs[1] - locs[0]
delta_widget.value = str(delta)
axis.get_figure().canvas.mpl_connect('button_press_event', handler)
display(delta_widget)
def interact_tasks(trace, tasks=None, kind=None):
"""
Decorator to make a block of code parametrized on a task that can be
selected from a dropdown.
:param trace: Trace object in use
:type trace: lisa.trace.Trace
:param tasks: List of tasks that are available. See ``kind`` for
alternative way of specifying tasks.
:type tasks: list(int or str or lisa.trace.TaskID) or None
:param kind: Alternatively to ``tasks``, a kind can be provided and the
tasks will be selected from the trace for you. It can be:
* ``rtapp`` to select all rt-app tasks
* ``all`` to select all tasks.
:type kind: str or None
**Example**::
trace = Trace('trace.dat')
# Allow selecting any rtapp task
@interact_tasks(trace, kind='rtapp')
def do_plot(task):
trace.analysis.load_tracking.plot_task_signals(task)
"""
if tasks is not None:
tasks = [
trace.get_task_id(task, update=False)
for task in tasks
]
else:
kind = kind or 'all'
if kind == 'all':
tasks = trace.task_ids
elif kind == 'rtapp':
tasks = trace.analysis.rta.rtapp_tasks
else:
raise ValueError(f'Unknown task kind: {kind}')
# Map of friendly names to actual objects
task_map = {
str(task): task
for task in tasks
}
def decorator(f):
@functools.wraps(f)
@interact
def wrapper(task=sorted(task_map.keys())):
task = task_map[task]
return f(task)
return wrapper
return decorator
def make_figure(width, height, nrows, ncols, interactive=None, **kwargs):
"""
Make a :class:`matplotlib.figure.Figure` and its axes.
:param width: Width of the figure.
:type width: int
:param height: Height of the figure.
:type height: int
:param interactive: If ``True``, create an interactive figure. Defaults to
``True`` when running under IPython, ``False`` otherwise.
:type interactive: bool or None
:Variable keyword arguments: Forwarded to :class:`matplotlib.figure.Figure`
:returns: A tuple of:
* :class:`matplotlib.figure.Figure`
* :class:`matplotlib.axes.Axes` as a scalar, an iterable (1D) or iterable of iterable matrix (2D)
"""
if interactive is None:
interactive = is_running_ipython()
if not interactive and tuple(map(int, mpl.__version__.split('.'))) <= (3, 0, 3):
warnings.warn('This version of matplotlib does not allow saving figures from axis created using Figure(), forcing interactive=True')
interactive = True
width *= ncols
height *= nrows
if interactive:
figure, axes = plt.subplots(
figsize=(width, height),
nrows=nrows,
ncols=ncols,
**kwargs,
)
else:
figure = Figure(figsize=(width, height))
axes = figure.subplots(ncols=ncols, nrows=nrows, **kwargs)
return (figure, axes)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| apache-2.0 |
LumPenPacK/NetworkExtractionFromImages | win_build/nefi2_win_amd64_msvc_2015/site-packages/numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-2-clause |
CassioAmador/profile_tcabr | visualization_tools/test_profile_bottollier_sim.py | 1 | 4261 | # -*- coding: utf-8 -*-
"""Test simulated data with the Bottollier method"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import integrate, optimize, interpolate
import sys
sys.path.insert(0, './../src/')
import proc_profile_bottollier as ppb
def density(r, n0=1.65e19, alpha=1.2):
"""Perfil radial de densidade."""
return n0 * np.power((1 - (r / .18) ** 2), alpha)
def r2f_plasma(r, n0=1.65e19, alpha=1.2):
"""Perfil radial da frequência de plasma."""
return 8.978663 * np.sqrt(density(r, n0, alpha))
def f_plasma2ne(f):
"""Converte frequência de plasma na densidade associada."""
return (f / 8.978663) ** 2
def f_plasma2rc(f, n0=1.65e19, alpha=1.2):
"""Calcula a posicao radial associada a frequência de plasma especificado."""
# Solução analítica para a densidade parabólica
return np.sqrt(1 - np.power((f / 8.978663) ** 2 / (n0), 1 / alpha)) * 0.18
# Solução genérica para qualquer formato de densidade
# (desabilitado porque nem sempre consegue convergir)
# if np.size(f) == 1:
# fun = lambda r: 1e-9 * density(r, n0, alpha) - 1e-9 * f_plasma2ne(f)
# rc = optimize.fsolve(fun, 0.12)
# else:
# rc = np.zeros(np.size(f))
# for i in range(np.size(rc)):
# fun = lambda r: 1e-9 * \
# density(r, n0, alpha) - 1e-9 * f_plasma2ne(f[i])
# rc[i] = optimize.fsolve(fun, 0.12)
# return rc
def n_index(r, f, n0=1.65e19, alpha=1.2):
"""Calcula o indece de refração para o plasma na posição r"""
return np.sqrt(f**2 - r2f_plasma(r, n0, alpha)**2) / f
def phase_shift(fc, n0=1.65e19, alpha=1.2):
phi = np.zeros(len(fc))
for i in range(len(phi)):
phi[i] = (4. * np.pi * fc[i] / 3e8) * integrate.quad(n_index, f_plasma2rc(fc[i],
n0, alpha), 0.18, args=(fc[i], n0, alpha,), epsabs=1e-14)[0] - np.pi / 2
return phi
def v_group_inv(r, f, n0=1.65e19, alpha=1.2):
"""Calcula o inverso da velocidade de grupo para o modo O."""
return (f * 1e-9 / np.sqrt((f * 1e-9) ** 2 - (r2f_plasma(r, n0, alpha) * 1e-9) ** 2)) / 3e8
def group_delay(f_probe, n0=1.65e19, alpha=1.2):
"""Calcula o atraso de grupo para a frequencia de sondagem."""
rc = f_plasma2rc(f_probe, n0, alpha)
tau = np.zeros(len(f_probe))
for i in range(len(tau)):
tau[i] = 2. * integrate.quad(v_group_inv, rc[i], 0.18, args=(
f_probe[i], n0, alpha,), epsrel=1e-14, epsabs=1e-14)[0]
return tau
if __name__ == "__main__":
n0 = 1.65e19
alpha = 1.2
# frequência de sondagem experimental
f_probe = np.linspace(16e9, np.min([35e9, r2f_plasma(0.001)]), 100)
tau = group_delay(f_probe, n0, alpha)
phi = phase_shift(f_probe, n0, alpha)
# inicializacao linear
# f_probe = np.append(np.linspace(1e9, f_probe[0], num=16, endpoint=False), f_probe)
# phi = np.append(np.linspace(-np.pi/2, phi[0], num=16, endpoint=False), phi)
# inicializacao cubica
# phi = np.append(np.polyval([(phi[0] + np.pi) / f_probe[1] ** 3., 0, 0, -np.pi/2], np.linspace(1e9, f_probe[0], num=10, endpoint=False)), phi)
# f_probe = np.append(np.linspace(1e9, f_probe[0], num=10, endpoint=False), f_probe)
phi = np.append(interpolate.interp1d([0, 1e9, f_probe[0], f_probe[5]], [-np.pi / 2, -0.999 * np.pi / 2, phi[
0], phi[5]], kind='cubic')(np.linspace(1e9, f_probe[0], num=20, endpoint=False)), phi)
f_probe = np.append(np.linspace(
1e9, f_probe[0], num=20, endpoint=False), f_probe)
# plt.plot(f_probe * 1e-9, phase_shift(f_probe, n0, alpha), 'r--')
# plt.plot(f_probe * 1e-9, phi, '.')
# plt.ylabel('phase [rad]')
# plt.xlabel('probing frequency [GHz]')
# plt.show()
r_real = np.linspace(0, 0.18, 50)
ne = density(r_real, n0, alpha)
rc_BC = ppb.find_pos(f_probe * 1e-9, phi)
ne = f_plasma2ne(f_probe)
fig = plt.figure()
plt.plot(rc_BC, ne * 1e-19, 'b.', label='BC')
plt.plot(r_real[::-1], density(r_real, n0, alpha)
* 1e-19, 'r--', label='real')
plt.ylabel('ne [10^19 /m^3]')
plt.xlabel('posicao radial [m]')
plt.xlim([0, 0.18])
plt.legend(loc=2)
plt.show()
| mit |
almarklein/bokeh | bokeh/charts/builder/horizon_builder.py | 1 | 11099 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Horizon class which lets you build your Horizon charts just
passing the arguments to the Chart class and calling the proper functions.
"""
from __future__ import division
import math
from six import string_types
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, Range1d, DataRange1d, FactorRange, GlyphRenderer, CategoricalAxis
from ...models.glyphs import Patches
from ...properties import Any, Color, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Horizon(values, index=None, num_folds=3, pos_color='#006400',
neg_color='#6495ed', xscale='datetime', xgrid=False, ygrid=False,
**kws):
tools = kws.get('tools', True)
if tools == True:
tools = "save,resize,reset"
elif isinstance(tools, string_types):
tools = tools.replace('pan', '')
tools = tools.replace('wheel_zoom', '')
tools = tools.replace('box_zoom', '')
tools = tools.replace(',,', ',')
kws['tools'] = tools
chart = create_and_build(
HorizonBuilder, values, index=index, num_folds=num_folds, pos_color=pos_color,
neg_color=neg_color, xscale=xscale, xgrid=xgrid, ygrid=ygrid, **kws
)
# Hide numerical axis
chart.left[0].hide = True
# Add the series names to the y axis
chart.extra_y_ranges = {"series": FactorRange(factors=chart._builders[0]._series)}
chart.add_layout(CategoricalAxis(y_range_name="series"), 'left')
return chart
class HorizonBuilder(Builder):
"""This is the Horizon class and it is in charge of plotting
Horizon charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, separate the data into
a number of folds which stack on top of each others.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
Examples:
import datetime
from collections import OrderedDict
from bokeh.charts import Horizon
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
hz = Horizon(xyvalues, index='Date', title="horizon", legend="top_left",
ylabel='Stock Prices', filename="stocks_ts.html")
hz.show()
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
neg_color = Color("#6495ed", help="""
The color of the positive folds. (default: "#6495ed")
""")
num_folds = Int(3, help="""
The number of folds stacked on top of each other. (default: 3)
""")
pos_color = Color("#006400", help="""
The color of the positive folds. (default: "#006400")
""")
def __init__(self, values, **kws):
"""
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a
common custom index for all data series as follows:
- As a 1d iterable of any sort (of datetime values)
that will be used as series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame). The values
must be datetime values.
legend (str, optional): the legend of your chart. The legend
content is inferred from incoming input.It can be
``top_left``, ``top_right``, ``bottom_left``,
``bottom_right``. ``top_right`` is set if you set it
as True. Defaults to None.
palette(list, optional): a list containing the colormap as
hex values.
num_folds (int, optional):
pos_color (hex color string, optional): t
neg_color (hex color string, optional): the color of
the negative folds
(default: #6495ed)
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
super(HorizonBuilder, self).__init__(values, **kws)
self._fold_names = []
self._source = None
self._series = []
self._fold_height = {}
self._max_y = 0
def fold_coordinates(self, y, fold_no, fold_height, y_origin=0, graph_ratio=1):
""" Function that calculate the coordinates for a value given a fold
"""
height = fold_no * fold_height
quotient, remainder = divmod(abs(y), float(height))
v = fold_height
# quotient would be 0 if the coordinate is represented in this fold
# layer
if math.floor(quotient) == 0:
v = 0
if remainder >= height - fold_height:
v = remainder - height + fold_height
v = v * graph_ratio
# Return tuple of the positive and negative relevant position of
# the coordinate against the provided fold layer
if y > 0:
return (v + y_origin, fold_height * graph_ratio + y_origin)
else:
return (y_origin, fold_height * graph_ratio - v + y_origin)
def pad_list(self, l, padded_value=None):
""" Function that insert padded values at the start and end of
the list (l). If padded_value not provided, then duplicate the
values next to each end of the list
"""
if len(l) > 0:
l.insert(0, l[0] if padded_value is None else padded_value)
l.append(l[-1] if padded_value is None else padded_value)
return l
def get_data(self):
"""Use x/y data from the horizon values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the multiple area glyphes inside the ``draw`` method.
"""
for col in self._values.keys():
if isinstance(self.index, string_types) and col == self.index:
continue
self._series.append(col)
self._max_y = max(max(self._values[col]), self._max_y)
v_index = [x for x in self._values_index]
self.set_and_get("x_", col, self.pad_list(v_index))
self._fold_height = self._max_y / self.num_folds
self._graph_ratio = self.num_folds / len(self._series)
fill_alpha = []
fill_color = []
for serie_no, serie in enumerate(self._series):
self.set_and_get('y_', serie, self._values[serie])
y_origin = serie_no * self._max_y / len(self._series)
for fold_itr in range(1, self.num_folds + 1):
layers_datapoints = [self.fold_coordinates(
x, fold_itr, self._fold_height, y_origin, self._graph_ratio) for x in self._values[serie]]
pos_points, neg_points = map(list, zip(*(layers_datapoints)))
alpha = 1.0 * (abs(fold_itr)) / self.num_folds
# Y coordinates above 0
pos_points = self.pad_list(pos_points, y_origin)
self.set_and_get("y_fold%s_" % fold_itr, serie, pos_points)
self._fold_names.append("y_fold%s_%s" % (fold_itr, serie))
fill_color.append(self.pos_color)
fill_alpha.append(alpha)
# Y coordinates below 0
neg_points = self.pad_list(
neg_points, self._fold_height * self._graph_ratio + y_origin)
self.set_and_get("y_fold-%s_" % fold_itr, serie, neg_points)
self._fold_names.append("y_fold-%s_%s" % (fold_itr, serie))
fill_color.append(self.neg_color)
fill_alpha.append(alpha)
# Groups shown in the legend will only appear once
if serie_no == 0:
self._groups.append(str(self._fold_height * fold_itr))
self._groups.append(str(self._fold_height * -fold_itr))
self.set_and_get('fill_', 'alpha', fill_alpha)
self.set_and_get('fill_', 'color', fill_color)
self.set_and_get('x_', 'all', [self._data[
'x_%s' % serie] for serie in self._series for y in range(self.num_folds * 2)])
self.set_and_get(
'y_', 'all', [self._data[f_name] for f_name in self._fold_names])
def get_source(self):
"""Push the Horizon data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d(sources=[self._source.columns(self._attr[0])])
self.y_range = Range1d(start=0, end=self._max_y)
def draw(self):
"""Use the patch glyphs to connect the xy points in the time series.
It requires the positive and negative layers
Takes reference points from the data loaded at the ColumnDataSource.
"""
patches = Patches(
fill_color='fill_color', fill_alpha='fill_alpha', xs='x_all', ys='y_all')
renderer = GlyphRenderer(data_source=self._source, glyph=patches)
# self._legends.append((self._groups[i-1], [renderer]))
yield renderer
# TODO: Add the tooltips to display the dates and all absolute y values for each series
# at any vertical places on the plot
# TODO: Add the legend to display the fold ranges based on the color of
# the fold | bsd-3-clause |
hbp-unibi/SNABSuite | plot/histogram.py | 1 | 2644 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SNABSuite -- Spiking Neural Architecture Benchmark Suite
# Copyright (C) 2017 Christoph Jenzen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""
Plots a histogram of a one dimensional list
"""
import argparse
parser = argparse.ArgumentParser(description='Plot a histogram')
# Required Parameters
parser.add_argument("files", metavar="files", nargs='+', help="files to plot")
# Optional arguments
parser.add_argument("-s", type=str, help="Name of the simulator", default="")
parser.add_argument("-t", type=str, help="Title of the plot", default="")
parser.add_argument("-b", help="Number of bins", default='auto')
parser.add_argument("-n", help="Normed histogram",
default=False, action="store_true")
args = parser.parse_args()
import numpy as np
import matplotlib.pyplot as plt
import os
from dim_labels import *
def histogram_plot(data, xlabel, title="", bins='auto', normed=False):
fig = plt.figure()
if bins is not "auto":
plt.hist(data, bins=int(bins), density=normed, color='black',
histtype="bar", rwidth=0.95)
else:
plt.hist(data, density=normed, color='black',
histtype="bar", rwidth=0.95)
plt.xlabel(xlabel)
if normed:
plt.ylabel("Probability")
else:
plt.ylabel("Frequency")
if not title == "":
plt.title(title)
return fig
if not os.path.exists("images"):
os.mkdir("images")
for target_file in args.files:
#import data
results = np.recfromtxt(target_file, delimiter=',', loose=True)
xlabel = DIM_LABELS[target_file.split(".csv")[0].split("_")[-1]]
if args.t is "":
title = target_file.split("/")[-1].split("_")[0]
else:
title = args.t
if args.s is not "":
title = title + " for " + SIMULATOR_LABELS[args.s]
fig = histogram_plot(results, xlabel, title, bins=args.b, normed=args.n)
fig.savefig(target_file.split(".csv")[0] + ".pdf", format='pdf',
bbox_inches='tight')
| gpl-3.0 |
gerritgr/LumPy | LumpEngine.py | 1 | 5735 | import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import sys, os, glob, subprocess
from utilities import *
#from sympy import *
from sympy import simplify
def multi_replace(s, subs):
for x,y in subs:
s = s.replace(x,y)
return s
def clean(s):
s = s.replace('["', '___open_bracket___')
s = s.replace('"]', '___open_closed___')
s = s.replace('[', '___open_bracket2___')
s = s.replace(']', '___open_closed2___')
return s
def declean(s):
s = s.replace('___open_bracket___', '["')
s = s.replace('___open_closed___', '"]')
s = s.replace('___open_bracket2___', '[')
s = s.replace('___open_closed2___', ']')
return s
def to_symbols(s):
try:
from symengine import sympify
return sympify(s)
except:
from sympy import sympify
return sympify(s)
def to_symengine(s):
from symengine import var, sympify
s = str(s)
seperators = " ,;,+,-,=,),(,*,/,**".split(',')
for ch in seperators:
s = s.replace(ch, ' '+ch+' ')
s_list = s.split(' ')
v_list = [token for token in s_list if '_' in token]
v_list = list(set(v_list))
v = ' '.join(v_list)
if v.strip() != '':
var(v)
return sympify(eval(s))
def compute_formula_partial(line, subs=None):
global substitution
if len(line['ode_formula']) == 0:
return '0'
s = to_symbols('0')
formula = line['ode_formula']
weight = line['weight_normalized']
if subs is None:
subs = substitution
for i in range(len(formula)):
s += to_symbols('({w}*({f}))'.format(w=weight[i], f=multi_replace(clean(formula[i]),subs)))
s = str(s)
s = declean(s)
return s
def compute_formula(line, subs=None):
if subs is None:
subs = substitution
if len(line['ode_formula']) == 0:
return '0'
s = '0'
formula = line['ode_formula']
weight = line['weight_normalized']
for i in range(len(formula)):
s += '+({w}*({f}))'.format(w=weight[i], f=multi_replace(clean(formula[i]),subs))
s = to_symbols(s)
s = str(s)
s = declean(s)
return s
def compute_init(line):
weight = line['weight_normalized']
init = line['initial_value']
init_mean = 0.0
for i in range(len(init)):
init_mean += weight[i]*init[i]
return init_mean
def compute_degree_count(line):
weight = line['weight_normalized']
degree_count = line['degree_count']
degree_count_mean = 0.0
for i in range(len(weight)):
degree_count_mean += weight[i]*degree_count[i]
return degree_count_mean
def compute_subs(df, scale_during_substitution):
subs = list()
#scale_during_substitution=False
if scale_during_substitution:
for _, row in df.iterrows():
old_names = row['old_names']
old_inits = row['initial_value_old']
old_degrees = row['degree_count']
cluster_name = row['ode_name']
init = row['initial_value']
avg_degree = row['degree_count_avg']
for i in range(len(old_names)):
old_name = old_names[i]
old_init = old_inits[i]
init_scale = old_init/init
init_scale = avg_degree/old_degrees[i]
new_name_dt = '({s}*{n})'.format(s=init_scale, n = clean(cluster_name))
new_name = '({s}*{n})'.format(s=init_scale, n = clean(cluster_name)[3:])
subs.append((clean(old_name), new_name_dt))
subs.append((clean(old_name)[3:], new_name))
else:
for _, row in df.iterrows():
old_names = row['old_names']
cluster_name = row['ode_name']
for name in old_names:
subs.append((clean(name), clean(cluster_name)))
subs.append((clean(name)[3:], clean(cluster_name)[3:]))
return subs
def compute_formula_torow(df):
result = df.apply(compute_formula, axis=1)
logger.debug('Pool done.')
return result
substitution = dict()
def lump(df, scale_during_substitution = True):
global substitution
from functools import partial
assert('ode' in df.columns and 'cluster_indicator' in df.columns and 'initial_value' in df.columns)
assert('name_old' not in df.columns)
assert('old_names' not in df.columns)
assert('ode_formula' not in df.columns)
assert('ode_name' not in df.columns)
assert('weight_normalized' not in df.columns)
assert('weight_sum' not in df.columns)
assert('initial_value_old' not in df.columns)
#assert(normalization in ['standard', 'softmax'])
if 'weight' not in df.columns:
df['weight'] = 1.0
#df = df[df['weight'] > 0]
df['ode_name'] = df.apply(lambda l: l['ode'].split('=')[0].strip(), axis=1)
df['ode_formula'] = df.apply(lambda l: l['ode'].split('=')[1].strip(), axis=1)
del df['ode']
df = df.groupby('cluster_indicator')
df = df.agg(lambda l: tuple(l))
df = df.reset_index()
df['old_names'] = df['ode_name']
df['ode_name'] = ['dt_x["cluster'+str(i).zfill(len(str(len(df['old_names']))))+'"]' for i in range(len(df['old_names']))]
df['weight_sum'] = df.apply(lambda l: np.sum(l['weight']), axis=1)
df['weight_normalized'] = df.apply(lambda l: tuple([v/l['weight_sum'] for v in l['weight']]), axis=1)
df['initial_value_old'] = df['initial_value']
df['initial_value'] = df.apply(compute_init, axis=1)
df['degree_count_avg'] = df.apply(compute_degree_count, axis=1)
# parallel lumping
substitution = compute_subs(df, scale_during_substitution)
logger.info('Compute lumped ODEs.')
from multiprocessing import Pool, cpu_count
cores = cpu_count()
data_split = np.array_split(df, len(df.index))
pool = Pool(cores)
new_data = pool.map(compute_formula_torow, data_split)
data = pd.concat(new_data)
pool.close()
pool.join()
df['ode_formula'] = data
del new_data
del data_split
del data
# same as
# same as df['ode_formula'] = df.apply(lambda x: compute_formula(x,substitution), axis=1)
logger.info('Compute lumped ODEs Done.')
df['ode'] = df['ode_name']+'='+df['ode_formula']
return df
# d = {'cluster_indicator': [2,3,2], 'formula':['3+x[0]','4+y','5+x**2+z'], 'name':['x','y','z'], 'weight': [0.6,1.4,0.4]}
# df = pd.DataFrame.from_dict(d)
# dx = lump(df)
# print(dx)
| gpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/google/cloud/monitoring/_dataframe.py | 7 | 4342 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time series as :mod:`pandas` dataframes."""
import itertools
TOP_RESOURCE_LABELS = (
'project_id',
'aws_account',
'location',
'region',
'zone',
)
def _build_dataframe(time_series_iterable,
label=None, labels=None): # pragma: NO COVER
"""Build a :mod:`pandas` dataframe out of time series.
:type time_series_iterable:
iterable over :class:`~google.cloud.monitoring.timeseries.TimeSeries`
:param time_series_iterable:
An iterable (e.g., a query object) yielding time series.
:type label: str
:param label:
(Optional) The label name to use for the dataframe header. This can be
the name of a resource label or metric label (e.g.,
``"instance_name"``), or the string ``"resource_type"``.
:type labels: list of strings, or None
:param labels:
A list or tuple of label names to use for the dataframe header.
If more than one label name is provided, the resulting dataframe
will have a multi-level column header.
Specifying neither ``label`` or ``labels`` results in a dataframe
with a multi-level column header including the resource type and
all available resource and metric labels.
Specifying both ``label`` and ``labels`` is an error.
:rtype: :class:`pandas.DataFrame`
:returns: A dataframe where each column represents one time series.
"""
import pandas # pylint: disable=import-error
if labels is not None:
if label is not None:
raise ValueError('Cannot specify both "label" and "labels".')
elif not labels:
raise ValueError('"labels" must be non-empty or None.')
columns = []
headers = []
for time_series in time_series_iterable:
pandas_series = pandas.Series(
data=[point.value for point in time_series.points],
index=[point.end_time for point in time_series.points],
)
columns.append(pandas_series)
headers.append(time_series.header())
# Implement a smart default of using all available labels.
if label is None and labels is None:
resource_labels = set(itertools.chain.from_iterable(
header.resource.labels for header in headers))
metric_labels = set(itertools.chain.from_iterable(
header.metric.labels for header in headers))
labels = (['resource_type'] +
_sorted_resource_labels(resource_labels) +
sorted(metric_labels))
# Assemble the columns into a DataFrame.
dataframe = pandas.DataFrame.from_records(columns).T
# Convert the timestamp strings into a DatetimeIndex.
dataframe.index = pandas.to_datetime(dataframe.index)
# Build a multi-level stack of column headers. Some labels may
# be undefined for some time series.
levels = []
for key in labels or [label]:
level = [header.labels.get(key, '') for header in headers]
levels.append(level)
# Build a column Index or MultiIndex. Do not include level names
# in the column header if the user requested a single-level header
# by specifying "label".
dataframe.columns = pandas.MultiIndex.from_arrays(
levels,
names=labels or None)
# Sort the rows just in case (since the API doesn't guarantee the
# ordering), and sort the columns lexicographically.
return dataframe.sort_index(axis=0).sort_index(axis=1)
def _sorted_resource_labels(labels):
"""Sort label names, putting well-known resource labels first."""
head = [label for label in TOP_RESOURCE_LABELS if label in labels]
tail = sorted(label for label in labels
if label not in TOP_RESOURCE_LABELS)
return head + tail
| mit |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/plotscriptgenerator/utils.py | 3 | 3050 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
import re
import numpy as np
from matplotlib.container import ErrorbarContainer
def round_to_sig_figs(number, sig_figs):
if np.isclose(number, 0):
return 0.0
return round(number, -int(np.floor(np.log10(np.abs(number)))) + (sig_figs - 1))
def convert_value_to_arg_string(value):
"""
Converts a given object into a string representation of that object
which can be passed to a function. It is recursive so works on objects
such as lists.
"""
if isinstance(value, str):
return "'{}'".format(value)
if isinstance(value, (list, np.ndarray, tuple)):
return "[{}]".format(', '.join([convert_value_to_arg_string(v) for v in value]))
if isinstance(value, dict):
kv_pairs = []
for key, val in value.items():
kv_pairs.append("{}: {}".format(convert_value_to_arg_string(key),
convert_value_to_arg_string(val)))
return "{{{}}}".format(', '.join(kv_pairs))
if isinstance(value, (float, np.float)):
return str(round_to_sig_figs(value, 5))
return str(value)
def convert_args_to_string(args, kwargs):
"""
Given list of args and dict of kwargs, constructs a string that
would be valid code to pass into a Python function
"""
arg_strings = [str(arg) for arg in args] if args else []
for kwarg, value in sorted(kwargs.items()): # sorting makes this testable
arg_strings.append("{}={}".format(kwarg, convert_value_to_arg_string(value)))
return ', '.join(arg_strings)
def get_plotted_workspaces_names(fig):
plotted_workspaces = []
for ax in fig.get_axes():
try:
plotted_workspaces += list(ax.tracked_workspaces.keys())
except AttributeError: # Scripted plots have no tracked workspaces
pass
return plotted_workspaces
def generate_workspace_retrieval_commands(fig):
workspace_names = get_plotted_workspaces_names(fig)
commands = ["from mantid.api import AnalysisDataService as ADS\n"]
for name in set(workspace_names):
variable_name = clean_variable_name(name)
commands.append("{} = ADS.retrieve('{}')".format(variable_name, name))
return commands
def clean_variable_name(name):
"""Converts a string into a valid Python variable name"""
return re.sub(r'\W|^(?=\d)', '_', name)
def sorted_lines_in(ax, artists):
lines = ax.get_lines()
err_containers = [cont for cont in ax.containers
if isinstance(cont, ErrorbarContainer)]
sorted_lines = []
for line in lines + err_containers:
if line in artists:
sorted_lines.append(line)
return sorted_lines
| gpl-3.0 |
devanshdalal/scikit-learn | sklearn/gaussian_process/gpc.py | 19 | 31639 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class : string, default : "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
.. versionadded:: 0.18
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/pylearn2/train_extensions/tests/test_wmape_channel.py | 32 | 2531 | """
Tests for WMAPE.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
from theano.compile import function
import numpy as np
from numpy.testing import assert_allclose
def test_wmape():
"""Test WMapeChannel."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
X = trainer.model.get_input_space().make_theano_batch()
Y = trainer.model.fprop(X)
f = function([X], Y, allow_input_downcast=True)
y_hat = f(trainer.dataset.X)
wmape_num_exp = abs(trainer.dataset.y - y_hat).sum()
wmape_den_exp = abs(trainer.dataset.y).sum()
exp_array = np.asarray([wmape_num_exp, wmape_den_exp])
wmape_num_real = trainer.model.monitor.channels['train_wmape_num'].\
val_record
wmape_den_real = trainer.model.monitor.channels['train_wmape_den'].\
val_record
real_array = np.asarray([wmape_num_real[-1], wmape_den_real[-1]])
assert_allclose(exp_array, real_array)
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.\
random_dense_design_matrix_for_regression
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 10,
dim: 10,
reg_min: 1,
reg_max: 1000
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 10,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: h0,
dim: 10,
irange: 0.05,
},
!obj:pylearn2.models.mlp.Linear {
layer_name: y,
dim: 1,
irange: 0.,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: train_wmape_num,
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.wmape_channel.WMapeNumeratorChannel {},
!obj:pylearn2.train_extensions.wmape_channel.\
WMapeDenominatorChannel {},
],
}
"""
| bsd-3-clause |
Ambrosys/climatelearn | climatelearn/learning/classification/weka_class_MP.py | 1 | 6925 | import subprocess
import shlex
import pandas as pd
import numpy as np
from copy import deepcopy
from climatelearn.learning.base_class import Classification
from .. import arf
from .. import errors
class MP_Weka_class(Classification):
def __init__(self,train_data, hyper):
"""
Constructor
------------
train_data: pandas DataFrame
Contains columns for features and for target variables. The names of the target variables ends
with the suffix "_tau"
hyper: dictionary
It contains the hyper parameters necessary to run all the functionality of the model.
They are the following:
"structure" is a list of integers determining the number of neurons in each hidden layer.
"epochs" an integer specifying the maximum number of epochs to run during every training session.
"learning_rate" a float giving the learning rate of the gradient descend.
"momentum" a float giving the value of the momentum for the algorithm.
Other parameters regarding cross validation are explained in the base class.
"""
Classification.__init__(self, train_data, hyper)
self.structure = self._build_structure(hyper['structure'])
self.epochs = hyper['epochs']
self.learning_rate = hyper['learning_rate']
self.momentum = hyper['momentum']
self.path_train = hyper['path_train']
self.path_test = hyper['path_test']
self.path_model = hyper['path_model']
self.path_output = hyper['path_output']
def learn(self, train_data = None):
"""
Performs single run training.
----------
train_data: pandas Dataframe
It needs to contain datetime objects on index, and both features and target variables.
The target variables need to end with the suffix "_tau". If None the self.train_set
variable passed at the moment of instantiation will be used.
Returns: string
It returns the path to the model created.
"""
if train_data is not None:
self.train_set = train_data
self._write_arff(self.train_set, self.path_train)
command = "java -classpath /usr/share/java/weka.jar weka.classifiers.functions.MultilayerPerceptron -L " +\
str(self.learning_rate) + " -M " + str(self.momentum)
command += " -N " + str(self.epochs) + " -V 0 -S 0 -E 20 -H " + self.structure + " -t " + self.path_train
command += " -d " + self.path_model
args = shlex.split(command)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.wait()
predicted, actual = self.test(self.train_set, train=True)
conf_matrix = errors.confusion_matrix(predicted, actual, key='NINO34_tau_class')
return self.path_model, conf_matrix, None
def xvalidate(self, train_data = None, folds = None):
return None
def test(self, data, train=False):
data_y = deepcopy(data)
for k in [x for x in data_y.keys() if '_tau' not in x]:
del data_y[k]
if train:
self._write_arff(data, self.path_train)
command = "java -classpath /usr/share/java/weka.jar weka.classifiers.functions.MultilayerPerceptron -l " \
+ self.path_model + " -T " + self.path_train + " -p 0"
else:
self._write_arff(data, self.path_test)
command = "java -classpath /usr/share/java/weka.jar weka.classifiers.functions.MultilayerPerceptron -l "\
+ self.path_model + " -T " + self.path_test + " -p 0"
args = shlex.split(command)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.wait()
fil = open(self.path_output, "w")
fil.write(p.communicate()[0])
fil.close()
predicted = self._read_output(self.path_output)
predicted = pd.DataFrame(predicted,index=data.index,columns=self.targets)
return predicted, data_y
def predict(self, test_data):
#Todo: Here we add a dummy energy_tau column needed for weka
test_data["c"] = pd.Series(np.zeros(len(test_data.index)),index=test_data.index)
self._write_arff(test_data, self.path_test)
command = "java -classpath /usr/share/java/weka.jar weka.classifiers.functions.MultilayerPerceptron -l "\
+ self.path_model + " -T " + self.path_test + " -p 0"
args = shlex.split(command)
p = subprocess.Popen(args, stdout=subprocess.PIPE)
p.wait()
fil = open(self.path_output, "w")
fil.write(p.communicate()[0])
fil.close()
return self._read_output(self.path_output)
def _build_structure(self,structure):
l = " \""
for i in range(0,len(structure)-1):
l += str(structure[i]) + ", "
l += str(structure[-1]) + "\" "
return l
def _write_arff(self, data, path):
attributes = []
for k in data.keys():
if "_class" not in k:
attributes.append([k,'REAL'])
else:
attributes.append([k, ['yes','no']])
new_data = deepcopy(data)
for k in [c for c in data.keys() if '_tau' in c]:
new_data = self._exchange_attr(new_data, attributes, k)
data_write = {'data': new_data[1:], 'attributes': [tuple(l) for l in attributes], 'relation': unicode("__"),
'description': unicode("__")}
data_final = arf.dumps(data_write)
with open(path, "w") as f:
f.write(data_final)
return None
def _exchange_attr(self, data, attributes, y):
header = [k for k in data.keys() if k != y]
header.append(y)
new_list = [header]
for i in range(0, len(data[data.keys()[0]])):
lis_part = []
for k in header:
lis_part.append(data[k][data.index[i]])
new_list.append(lis_part)
attributes.remove([y, ['yes','no']])
attributes.append([y, ['yes','no']])
return new_list
def _read_output(self, path):
"""
Method for parsing weka regression results
:param model_dir: directory of the output
:param out_file: file name of the output
:return: a dictionary with keys "actual" and "predicted"
"""
res = []
with open(path,'r') as fin:
lines = fin.readlines()
for i in range(5,len(lines) -1):
linea = self._splitting(lines[i], ' ')
if 'yes' in str(linea[2]):
res.append('yes')
else:
res.append('no')
return np.array(res)
def _splitting(self, s,spacing = ' '):
new_s = []
for s in s.split(spacing):
if not(s==''):
new_s.append(s)
return new_s
| gpl-2.0 |
wathen/PhD | MHD/FEniCS/FieldSplit/test.py | 1 | 3495 | ls#!/usr/bin/python
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
from dolfin import *
import numpy as np
import matplotlib.pylab as plt
import PETScIO as IO
import CavityDomain3d as CD
def remove_ij(x, i, j):
# Remove the ith row
idx = range(x.shape[0])
idx.remove(i)
x = x[idx,:]
# Remove the jth column
idx = range(x.shape[1])
idx.remove(j)
x = x[:,idx]
return x
# Create mesh and define function space
nn = 2**4
mesh, boundaries = CD.CavityMesh3d(nn)
parameters['reorder_dofs_serial'] = False
V = VectorFunctionSpace(mesh, "CG", 2)
P = FunctionSpace(mesh, "CG", 1)
M = FunctionSpace(mesh, "N1curl", 1)
L = FunctionSpace(mesh, "CG", 1)
parameters['reorder_dofs_serial'] = False
W = MixedFunctionSpace([V,P,M,L])
def boundary(x, on_boundary):
return on_boundary
u01 =Expression(("0","0","0"),cell=triangle)
u02 =Expression(("1","0","0"),cell=triangle)
b0 = Expression(("1","0","0"),cell=triangle)
r0 = Expression(("0"),cell=triangle)
bcu1 = DirichletBC(W.sub(0),u01, boundaries,1)
bcu2 = DirichletBC(W.sub(0),u02, boundaries,2)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
bc = [bcu1,bcu2,bcb,bcr]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
K = 1e5
Mu_m = 1e5
MU = 1e-2
fns = Expression(("0","0","0"),mu = MU, k = K)
fm = Expression(("0","0","0"),k = K,mu_m = Mu_m)
"'Maxwell Setup'"
a11 = K*Mu_m*inner(curl(c),curl(b))*dx
a12 = inner(c,grad(r))*dx
a21 = inner(b,grad(s))*dx
Lmaxwell = inner(c, fm)*dx
maxwell = a11+a12+a21
"'NS Setup'"
u_k = Function(Velocity)
u_k.vector()[:] = u_k.vector()[:]*0
n = FacetNormal(mesh)
a11 = MU*inner(grad(v), grad(u))*dx+inner((grad(u)*u_k),v)*dx+(1/2)*div(u_k)*inner(u,v)*dx- (1/2)*inner(u_k,n)*inner(u,v)*ds
a12 = -div(v)*p*dx
a21 = -div(u)*q*dx
Lns = inner(v, fns)*dx
ns = a11+a12+a21
"'Coupling term Setup'"
b_k = Function(Electric)
b_k.vector()[:] = b_k.vector()[:]*0
eps = 1.0
tol = 1.0E-5
iter = 0
maxiter = 20
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
uu = Function(W)
tic()
AA, bb = assemble_system(maxwell+ns+CoupleTerm, Lmaxwell + Lns, bc)
As = AA.sparray()
StoreMatrix(As,"A")
VelPres = Velocitydim[xx-1][0] +Pressuredim[xx-1][0]
Adelete = remove_ij(As,VelPres-1,VelPres-1)
A = PETSc.Mat().createAIJ(size=Adelete.shape,csr=(Adelete.indptr, Adelete.indices, Adelete.data))
print toc()
b = np.delete(bb,VelPres-1,0)
zeros = 0*b
bb = IO.arrayToVec(b)
x = IO.arrayToVec(zeros)
ksp = PETSc.KSP().create()
pc = PETSc.PC().create()
ksp.setOperators(A)
ksp.setFromOptions()
print 'Solving with:', ksp.getType()
# Solve!
tic()
# start = time.time()
ksp.solve(bb, x)
# %timit ksp.solve(bb, x)
# print time.time() - start
time = toc()
print time
SolutionTime = SolutionTime +time
# print ksp.its
X = IO.vecToArray(x)
uu = X[0:Velocitydim[xx-1][0]]
bb1 = X[VelPres-1:VelPres+Electricdim[xx-1][0]-1]
u1 = Function(Velocity)
u1.vector()[:] = u1.vector()[:] + uu
diff = u1.vector().array() - u_k.vector().array()
epsu = np.linalg.norm(diff, ord=np.Inf)
b1 = Function(Electric)
b1.vector()[:] = b1.vector()[:] + bb1
diff = b1.vector().array() - b_k.vector().array()
epsb = np.linalg.norm(diff, ord=np.Inf)
eps = epsu+epsb
print '\n\n\niter=%d: norm=%g' % (iter, eps)
u_k.assign(u1)
b_k.assign(b1)
| mit |
cwu2011/scikit-learn | sklearn/externals/joblib/__init__.py | 35 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
jborlik/AdventOfCode2016 | day8.py | 1 | 1351 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
# Example of np.roll
#x = np.linspace(1,20,num=20,dtype=int)
#x = np.reshape(x, (4,5))
#print(x)
#x[:,1] = np.roll(x[:,1], 2 )
#print(x)
def processInstruction(ainstruction, thearray):
if (ainstruction[0] == 'rect'):
dims = list(map(int,ainstruction[1].split('x')))
thearray[0:dims[1],0:dims[0]] = np.ones((dims[1],dims[0]),dtype=int)
elif (ainstruction[0] == 'rotate'):
if (ainstruction[1] == 'row'):
row = int(ainstruction[2].split('=')[1])
steps = int(ainstruction[4])
thearray[row,:] = np.roll(thearray[row,:],steps)
elif (ainstruction[1] == 'column'):
col = int(ainstruction[2].split('=')[1])
steps = int(ainstruction[4])
thearray[:,col] = np.roll(thearray[:,col],steps)
else:
print("UNKNOWN INSTRUCTION:", ainstruction)
else:
print("UNKNOWN INSTRUCTION:", ainstruction)
with open('day8.dat') as datafile:
instructions = [x.strip().split(' ') for x in datafile.readlines()]
display = np.zeros( (6,50), dtype=int)
for ins in instructions:
processInstruction(ins,display)
print(display)
print("Sum: ", np.sum(display))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(display)
plt.show()
| gpl-3.0 |
humancompatibleai/imitation | src/imitation/analysis/mountain_car_plots.py | 1 | 8385 | """Heatmaps and reward plotting code for debugging MountainCar."""
import pathlib
import pickle
from typing import Dict, List, Optional, Sequence, Union
import gym
import numpy as np
from matplotlib import pyplot as plt
from stable_baselines3.common import vec_env
from imitation.data import rollout, types
from imitation.policies import serialize as policies_serialize
from imitation.rewards import common
from imitation.rewards import serialize as rewards_serialize
MC_POS_MIN, MC_POS_MAX = -1.2, 0.6
MC_VEL_MIN, MC_VEL_MAX = -0.07, 0.07
MC_GOAL_POS = 0.5
MC_NUM_ACTS = 3
ACT_NAMES = ["left", "neutral", "right"]
def _make_next_mc_obs(obs, acts) -> np.ndarray:
"""Utility for calculating the MountainCar-v0 next observation s'.
Required for evaluating AIRL reward at arbitrary (s, a) points.
"""
env = gym.make("MountainCar-v0")
obs = np.array(obs)
acts = np.array(acts)
next_obs = []
for ob, act in zip(obs, acts):
assert ob.shape == (2,)
env.reset()
env.unwrapped.state = ob
next_ob = env.step(act)[0]
next_obs.append(next_ob)
return np.array(next_obs)
def make_heatmap(
act: int,
reward_fn: common.RewardFn,
n_pos_step: int = 18,
n_vel_step: int = 14,
mark_goal: bool = True,
gen_trajs: Optional[List[types.Trajectory]] = None,
exp_trajs: Optional[List[types.Trajectory]] = None,
legend_on: bool = True,
title: bool = None,
heatmap: bool = True,
filter_trans_by_act: bool = True,
) -> plt.Figure:
"""Make a MountainCar heatmap of rewards for a particular action.
X axis is position. Y axis is velocity.
Args:
act: The MountainCar action number whose reward we are evaluating.
Should be 0, 1, or 2.
reward_fn: Reward function. Should accept unnormalized inputs.
n_pos_step: The number of squares that the x axis of the heatmap is divided
into.
n_vel_step: The number of squares that the y axis of the heatmap is divided
into.
gen_trajs: A list of generator trajectories to
scatterplot on top of the heatmap.
exp_trajs: A list of exp trajectories to scatterplot on
top of the heatmap.
legend_on: Whether to plot the legend.
title: Custom title.
heatmap: Whether to plot the heatmap.
filter_trans_by_act: If True, then filter out transitions from
`gen_trajs` and `exp_trajs` that don't use action `act` before
scatterplotting.
"""
assert 0 <= act < MC_NUM_ACTS
def convert_traj_to_coords_filtered(trajs: Sequence[types.Trajectory]):
trans = rollout.flatten_trajectories(trajs)
obs = trans.obs
if filter_trans_by_act:
obs = obs[trans.acts == act]
return obs[:, 0], obs[:, 1]
fig, ax = plt.subplots()
if heatmap:
pos_space = np.linspace(MC_POS_MIN, MC_POS_MAX, n_pos_step, endpoint=True)
vel_space = np.linspace(MC_VEL_MIN, MC_VEL_MAX, n_vel_step, endpoint=True)
obs_vec = np.array([[p, v] for p in pos_space for v in vel_space])
acts_vec = np.array([act] * len(obs_vec))
next_obs_vec = _make_next_mc_obs(obs_vec, acts_vec)
dones = np.zeros(len(acts_vec), dtype=np.bool)
rew = reward_fn(obs_vec, acts_vec, next_obs_vec, dones)
# Transpose because `pcolor` (confusingly) expects its first two arguments
# to be XY, but its matrix argument to be in RC format.
rew_matrix = rew.reshape(n_pos_step, n_vel_step).T
c = ax.pcolor(pos_space, vel_space, rew_matrix)
fig.colorbar(c, ax=ax)
if mark_goal:
ax.axvline(
x=MC_GOAL_POS, linestyle="--", label=f"goal state (pos={MC_GOAL_POS})"
)
if exp_trajs is not None:
X, Y = convert_traj_to_coords_filtered(exp_trajs)
ax.scatter(X, Y, marker="o", label="expert samples", alpha=0.2)
if gen_trajs is not None:
X, Y = convert_traj_to_coords_filtered(gen_trajs)
ax.scatter(X, Y, marker="o", c="yellow", label="policy samples", alpha=0.2)
if title is None:
title = f"Action {ACT_NAMES[act]}"
ax.set_title(title)
if legend_on:
ax.legend(loc="center left", bbox_to_anchor=(0, 1.3))
return fig
def batch_reward_heatmaps(
checkpoints_dir: Union[str, pathlib.Path],
n_gen_trajs: int = 50,
exp_trajs: Optional[List[types.Trajectory]] = None,
) -> Dict[pathlib.Path, plt.Figure]:
"""Build multiple mountain car reward heatmaps from a checkpoint directory.
One plot is generated for every combination of action and checkpoint timestep.
Args:
checkpoints_dir: Path to `checkpoint/` directory from AIRL or GAIL output
directory.
n_gen_trajs: The number of trajectories to rollout using each generator
checkpoint. The transitions in the trajectory are scatterplotted on top of
the heatmap from the same checkpoint timestamp. Nonpositive indicates that
no trajectories should be plotted.
exp_trajs: Expert trajectories for scatterplotting. Generator trajectories
are dynamically generated from generator checkpoints.
Returns:
A dictionary mapping relative paths to `plt.Figure`. Every key is of the
form "{action_name}/{checkpoint_step}" where action_name is "left",
"neutral", or "right".
"""
result = {}
venv = vec_env.DummyVecEnv([lambda: gym.make("MountainCar-v0")])
checkpoints_dir = pathlib.Path(checkpoints_dir)
for checkpoint_dir in sorted(checkpoints_dir.iterdir()):
vec_normalize_path = checkpoint_dir / "gen_policy" / "vec_normalize.pkl"
discrim_path = checkpoint_dir / "discrim.pt"
policy_path = checkpoint_dir / "gen_policy"
if n_gen_trajs > 0:
# `load_policy` automatically loads VecNormalize for policy evaluation.
gen_policy = policies_serialize.load_policy("ppo", str(policy_path), venv)
gen_trajs = rollout.generate_trajectories(
gen_policy, venv, sample_until=rollout.min_episodes(n_gen_trajs)
)
else:
gen_trajs = None
# `gen_trajs` contains unnormalized observations.
# Load VecNormalize for use in RewardFn, which doesn't automatically
# normalize input observations.
with open(vec_normalize_path, "rb") as f:
vec_normalize = pickle.load(f) # type: vec_env.VecNormalize
vec_normalize.training = False
reward_fn = rewards_serialize.load_reward("DiscrimNet", discrim_path, venv)
norm_rew_fn = common.build_norm_reward_fn(
reward_fn=reward_fn, vec_normalize=vec_normalize
)
for act in range(MC_NUM_ACTS):
fig = make_heatmap(
act=act,
reward_fn=norm_rew_fn,
gen_trajs=gen_trajs,
exp_trajs=exp_trajs,
)
path = pathlib.Path(ACT_NAMES[act], checkpoint_dir.name)
result[path] = fig
return result
def plot_reward_vs_time(
trajs_dict: Dict[str, List[types.Trajectory]],
reward_fn: common.RewardFn,
preferred_colors: Optional[Dict[str, str]] = None,
) -> plt.Figure:
"""Plots a reward versus timestep line for each Trajectory.
Args:
trajs_dict: A dictionary mapping rollout labels (e.g. "expert" or
"gen policy") to rollouts associated with those labels.
reward_fn: Reward function for evaluating rollout rewards.
preferred_colors: An optional dictionary mapping rollout labels to
preferred line colors.
Returns:
The figure.
"""
if preferred_colors is None:
preferred_colors = {}
fig, ax = plt.subplots()
for i, (trajs_label, trajs_list) in enumerate(trajs_dict.items()):
X = []
Y = []
for traj in trajs_list:
T = len(traj.rews)
X.extend(range(T))
dones = np.zeros(T, dtype=np.bool)
dones[-1] = True
rews = reward_fn(traj.obs[:-1], traj.acts, traj.obs[1:], dones)
Y.extend(rews)
color = preferred_colors.get(trajs_label, None)
ax.plot(X, Y, label=trajs_label, c=color)
ax.set_xlabel("timestep")
ax.set_ylabel("test reward")
ax.legend()
return fig
| mit |
bjackman/workload-automation | wlauto/utils/misc.py | 2 | 29311 | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Miscellaneous functions that don't fit anywhere else.
"""
from __future__ import division
import os
import sys
import re
import math
import imp
import string
import threading
import signal
import pkgutil
import traceback
import logging
import random
import hashlib
import subprocess
from subprocess import CalledProcessError
from datetime import datetime, timedelta
from operator import mul, itemgetter
from StringIO import StringIO
from itertools import cycle, groupby
from functools import partial
from distutils.spawn import find_executable
import yaml
from dateutil import tz
# ABI --> architectures list
ABI_MAP = {
'armeabi': ['armeabi', 'armv7', 'armv7l', 'armv7el', 'armv7lh', 'armeabi-v7a'],
'arm64': ['arm64', 'armv8', 'arm64-v8a', 'aarch64'],
}
def preexec_function():
# Ignore the SIGINT signal by setting the handler to the standard
# signal handler SIG_IGN.
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Change process group in case we have to kill the subprocess and all of
# its children later.
# TODO: this is Unix-specific; would be good to find an OS-agnostic way
# to do this in case we wanna port WA to Windows.
os.setpgrp()
check_output_logger = logging.getLogger('check_output')
# Defined here rather than in wlauto.exceptions due to module load dependencies
class TimeoutError(Exception):
"""Raised when a subprocess command times out. This is basically a ``WAError``-derived version
of ``subprocess.CalledProcessError``, the thinking being that while a timeout could be due to
programming error (e.g. not setting long enough timers), it is often due to some failure in the
environment, and there fore should be classed as a "user error"."""
def __init__(self, command, output):
super(TimeoutError, self).__init__('Timed out: {}'.format(command))
self.command = command
self.output = output
def __str__(self):
return '\n'.join([self.message, 'OUTPUT:', self.output or ''])
class CalledProcessErrorWithStderr(CalledProcessError):
def __init__(self, *args, **kwargs):
self.output = kwargs.pop("output")
self.error = kwargs.pop("error")
super(CalledProcessErrorWithStderr, self).__init__(*args, **kwargs)
def __str__(self):
return '{}\nSTDOUT: {}\nSTDERR:{}'.format(CalledProcessError.__str__(self),
self.output, self.error)
__repr__ = __str__
def check_output(command, timeout=None, ignore=None, **kwargs):
"""This is a version of subprocess.check_output that adds a timeout parameter to kill
the subprocess if it does not return within the specified time."""
# pylint: disable=too-many-branches
if ignore is None:
ignore = []
elif isinstance(ignore, int):
ignore = [ignore]
elif not isinstance(ignore, list) and ignore != 'all':
message = 'Invalid value for ignore parameter: "{}"; must be an int or a list'
raise ValueError(message.format(ignore))
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
def callback(pid):
try:
check_output_logger.debug('{} timed out; sending SIGKILL'.format(pid))
os.killpg(pid, signal.SIGKILL)
except OSError:
pass # process may have already terminated.
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=preexec_function, **kwargs)
if timeout:
timer = threading.Timer(timeout, callback, [process.pid, ])
timer.start()
try:
output, error = process.communicate()
finally:
if timeout:
timer.cancel()
retcode = process.poll()
if retcode:
if retcode == -9: # killed, assume due to timeout callback
raise TimeoutError(command, output='\n'.join([output, error]))
elif ignore != 'all' and retcode not in ignore:
raise CalledProcessErrorWithStderr(retcode, command, output=output, error=error)
return output, error
def walk_modules(path):
"""
Given package name, return a list of all modules (including submodules, etc)
in that package.
"""
root_mod = __import__(path, {}, {}, [''])
mods = [root_mod]
for _, name, ispkg in pkgutil.iter_modules(root_mod.__path__):
submod_path = '.'.join([path, name])
if ispkg:
mods.extend(walk_modules(submod_path))
else:
submod = __import__(submod_path, {}, {}, [''])
mods.append(submod)
return mods
def ensure_directory_exists(dirpath):
"""A filter for directory paths to ensure they exist."""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
return dirpath
def ensure_file_directory_exists(filepath):
"""
A filter for file paths to ensure the directory of the
file exists and the file can be created there. The file
itself is *not* going to be created if it doesn't already
exist.
"""
ensure_directory_exists(os.path.dirname(filepath))
return filepath
def diff_tokens(before_token, after_token):
"""
Creates a diff of two tokens.
If the two tokens are the same it just returns returns the token
(whitespace tokens are considered the same irrespective of type/number
of whitespace characters in the token).
If the tokens are numeric, the difference between the two values
is returned.
Otherwise, a string in the form [before -> after] is returned.
"""
if before_token.isspace() and after_token.isspace():
return after_token
elif before_token.isdigit() and after_token.isdigit():
try:
diff = int(after_token) - int(before_token)
return str(diff)
except ValueError:
return "[%s -> %s]" % (before_token, after_token)
elif before_token == after_token:
return after_token
else:
return "[%s -> %s]" % (before_token, after_token)
def prepare_table_rows(rows):
"""Given a list of lists, make sure they are prepared to be formatted into a table
by making sure each row has the same number of columns and stringifying all values."""
rows = [map(str, r) for r in rows]
max_cols = max(map(len, rows))
for row in rows:
pad = max_cols - len(row)
for _ in xrange(pad):
row.append('')
return rows
def write_table(rows, wfh, align='>', headers=None): # pylint: disable=R0914
"""Write a column-aligned table to the specified file object."""
if not rows:
return
rows = prepare_table_rows(rows)
num_cols = len(rows[0])
# cycle specified alignments until we have max_cols of them. This is
# consitent with how such cases are handled in R, pandas, etc.
it = cycle(align)
align = [it.next() for _ in xrange(num_cols)]
cols = zip(*rows)
col_widths = [max(map(len, c)) for c in cols]
row_format = ' '.join(['{:%s%s}' % (align[i], w) for i, w in enumerate(col_widths)])
row_format += '\n'
if headers:
wfh.write(row_format.format(*headers))
underlines = ['-' * len(h) for h in headers]
wfh.write(row_format.format(*underlines))
for row in rows:
wfh.write(row_format.format(*row))
def get_null():
"""Returns the correct null sink based on the OS."""
return 'NUL' if os.name == 'nt' else '/dev/null'
def get_traceback(exc=None):
"""
Returns the string with the traceback for the specifiec exc
object, or for the current exception exc is not specified.
"""
if exc is None:
exc = sys.exc_info()
if not exc:
return None
tb = exc[2]
sio = StringIO()
traceback.print_tb(tb, file=sio)
del tb # needs to be done explicitly see: http://docs.python.org/2/library/sys.html#sys.exc_info
return sio.getvalue()
def merge_dicts(*args, **kwargs):
if len(args) < 2:
raise ValueError('Must specify at least two dicts to merge.')
func = partial(_merge_two_dicts, **kwargs)
return reduce(func, args)
def _merge_two_dicts(base, other, list_duplicates='all', match_types=False, # pylint: disable=R0912,R0914
dict_type=dict, should_normalize=True, should_merge_lists=True):
"""Merge dicts normalizing their keys."""
merged = dict_type()
base_keys = base.keys()
other_keys = other.keys()
norm = normalize if should_normalize else lambda x, y: x
base_only = []
other_only = []
both = []
union = []
for k in base_keys:
if k in other_keys:
both.append(k)
else:
base_only.append(k)
union.append(k)
for k in other_keys:
if k in base_keys:
union.append(k)
else:
union.append(k)
other_only.append(k)
for k in union:
if k in base_only:
merged[k] = norm(base[k], dict_type)
elif k in other_only:
merged[k] = norm(other[k], dict_type)
elif k in both:
base_value = base[k]
other_value = other[k]
base_type = type(base_value)
other_type = type(other_value)
if (match_types and (base_type != other_type) and
(base_value is not None) and (other_value is not None)):
raise ValueError('Type mismatch for {} got {} ({}) and {} ({})'.format(k, base_value, base_type,
other_value, other_type))
if isinstance(base_value, dict):
merged[k] = _merge_two_dicts(base_value, other_value, list_duplicates, match_types, dict_type)
elif isinstance(base_value, list):
if should_merge_lists:
merged[k] = _merge_two_lists(base_value, other_value, list_duplicates, dict_type)
else:
merged[k] = _merge_two_lists([], other_value, list_duplicates, dict_type)
elif isinstance(base_value, set):
merged[k] = norm(base_value.union(other_value), dict_type)
else:
merged[k] = norm(other_value, dict_type)
else: # Should never get here
raise AssertionError('Unexpected merge key: {}'.format(k))
return merged
def merge_lists(*args, **kwargs):
if len(args) < 2:
raise ValueError('Must specify at least two lists to merge.')
func = partial(_merge_two_lists, **kwargs)
return reduce(func, args)
def _merge_two_lists(base, other, duplicates='all', dict_type=dict): # pylint: disable=R0912
"""
Merge lists, normalizing their entries.
parameters:
:base, other: the two lists to be merged. ``other`` will be merged on
top of base.
:duplicates: Indicates the strategy of handling entries that appear
in both lists. ``all`` will keep occurrences from both
lists; ``first`` will only keep occurrences from
``base``; ``last`` will only keep occurrences from
``other``;
.. note:: duplicate entries that appear in the *same* list
will never be removed.
"""
if not isiterable(base):
base = [base]
if not isiterable(other):
other = [other]
if duplicates == 'all':
merged_list = []
for v in normalize(base, dict_type) + normalize(other, dict_type):
if not _check_remove_item(merged_list, v):
merged_list.append(v)
return merged_list
elif duplicates == 'first':
base_norm = normalize(base, dict_type)
merged_list = normalize(base, dict_type)
for v in base_norm:
_check_remove_item(merged_list, v)
for v in normalize(other, dict_type):
if not _check_remove_item(merged_list, v):
if v not in base_norm:
merged_list.append(v) # pylint: disable=no-member
return merged_list
elif duplicates == 'last':
other_norm = normalize(other, dict_type)
merged_list = []
for v in normalize(base, dict_type):
if not _check_remove_item(merged_list, v):
if v not in other_norm:
merged_list.append(v)
for v in other_norm:
if not _check_remove_item(merged_list, v):
merged_list.append(v)
return merged_list
else:
raise ValueError('Unexpected value for list duplicates argument: {}. '.format(duplicates) +
'Must be in {"all", "first", "last"}.')
def _check_remove_item(the_list, item):
"""Helper function for merge_lists that implements checking wether an items
should be removed from the list and doing so if needed. Returns ``True`` if
the item has been removed and ``False`` otherwise."""
if not isinstance(item, basestring):
return False
if not item.startswith('~'):
return False
actual_item = item[1:]
if actual_item in the_list:
del the_list[the_list.index(actual_item)]
return True
def normalize(value, dict_type=dict):
"""Normalize values. Recursively normalizes dict keys to be lower case,
no surrounding whitespace, underscore-delimited strings."""
if isinstance(value, dict):
normalized = dict_type()
for k, v in value.iteritems():
if isinstance(k, basestring):
k = k.strip().lower().replace(' ', '_')
normalized[k] = normalize(v, dict_type)
return normalized
elif isinstance(value, list):
return [normalize(v, dict_type) for v in value]
elif isinstance(value, tuple):
return tuple([normalize(v, dict_type) for v in value])
else:
return value
VALUE_REGEX = re.compile(r'(\d+(?:\.\d+)?)\s*(\w*)')
UNITS_MAP = {
's': 'seconds',
'ms': 'milliseconds',
'us': 'microseconds',
'ns': 'nanoseconds',
'V': 'volts',
'A': 'amps',
'mA': 'milliamps',
'J': 'joules',
}
def parse_value(value_string):
"""parses a string representing a numerical value and returns
a tuple (value, units), where value will be either int or float,
and units will be a string representing the units or None."""
match = VALUE_REGEX.search(value_string)
if match:
vs = match.group(1)
value = float(vs) if '.' in vs else int(vs)
us = match.group(2)
units = UNITS_MAP.get(us, us)
return (value, units)
else:
return (value_string, None)
def get_meansd(values):
"""Returns mean and standard deviation of the specified values."""
if not values:
return float('nan'), float('nan')
mean = sum(values) / len(values)
sd = math.sqrt(sum([(v - mean) ** 2 for v in values]) / len(values))
return mean, sd
def geomean(values):
"""Returns the geometric mean of the values."""
return reduce(mul, values) ** (1.0 / len(values))
def capitalize(text):
"""Capitalises the specified text: first letter upper case,
all subsequent letters lower case."""
if not text:
return ''
return text[0].upper() + text[1:].lower()
def convert_new_lines(text):
""" Convert new lines to a common format. """
return text.replace('\r\n', '\n').replace('\r', '\n')
def escape_quotes(text):
"""Escape quotes, and escaped quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\\\'').replace('\"', '\\\"')
def escape_single_quotes(text):
"""Escape single quotes, and escaped single quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\'', '\'\\\'\'')
def escape_double_quotes(text):
"""Escape double quotes, and escaped double quotes, in the specified text."""
return re.sub(r'\\("|\')', r'\\\\\1', text).replace('\"', '\\\"')
def getch(count=1):
"""Read ``count`` characters from standard input."""
if os.name == 'nt':
import msvcrt # pylint: disable=F0401
return ''.join([msvcrt.getch() for _ in xrange(count)])
else: # assume Unix
import tty # NOQA
import termios # NOQA
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(count)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def isiterable(obj):
"""Returns ``True`` if the specified object is iterable and
*is not a string type*, ``False`` otherwise."""
return hasattr(obj, '__iter__') and not isinstance(obj, basestring)
def utc_to_local(dt):
"""Convert naive datetime to local time zone, assuming UTC."""
return dt.replace(tzinfo=tz.tzutc()).astimezone(tz.tzlocal())
def local_to_utc(dt):
"""Convert naive datetime to UTC, assuming local time zone."""
return dt.replace(tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
def as_relative(path):
"""Convert path to relative by stripping away the leading '/' on UNIX or
the equivant on other platforms."""
path = os.path.splitdrive(path)[1]
return path.lstrip(os.sep)
def get_cpu_mask(cores):
"""Return a string with the hex for the cpu mask for the specified core numbers."""
mask = 0
for i in cores:
mask |= 1 << i
return '0x{0:x}'.format(mask)
def load_class(classpath):
"""Loads the specified Python class. ``classpath`` must be a fully-qualified
class name (i.e. namspaced under module/package)."""
modname, clsname = classpath.rsplit('.', 1)
return getattr(__import__(modname), clsname)
def get_pager():
"""Returns the name of the system pager program."""
pager = os.getenv('PAGER')
if pager is None:
pager = find_executable('less')
if pager is None:
pager = find_executable('more')
return pager
def enum_metaclass(enum_param, return_name=False, start=0):
"""
Returns a ``type`` subclass that may be used as a metaclass for
an enum.
Paremeters:
:enum_param: the name of class attribute that defines enum values.
The metaclass will add a class attribute for each value in
``enum_param``. The value of the attribute depends on the type
of ``enum_param`` and on the values of ``return_name``. If
``return_name`` is ``True``, then the value of the new attribute is
the name of that attribute; otherwise, if ``enum_param`` is a ``list``
or a ``tuple``, the value will be the index of that param in
``enum_param``, optionally offset by ``start``, otherwise, it will
be assumed that ``enum_param`` implementa a dict-like inteface and
the value will be ``enum_param[attr_name]``.
:return_name: If ``True``, the enum values will the names of enum attributes. If
``False``, the default, the values will depend on the type of
``enum_param`` (see above).
:start: If ``enum_param`` is a list or a tuple, and ``return_name`` is ``False``,
this specifies an "offset" that will be added to the index of the attribute
within ``enum_param`` to form the value.
"""
class __EnumMeta(type):
def __new__(mcs, clsname, bases, attrs):
cls = type.__new__(mcs, clsname, bases, attrs)
values = getattr(cls, enum_param, [])
if return_name:
for name in values:
setattr(cls, name, name)
else:
if isinstance(values, list) or isinstance(values, tuple):
for i, name in enumerate(values):
setattr(cls, name, i + start)
else: # assume dict-like
for name in values:
setattr(cls, name, values[name])
return cls
return __EnumMeta
def which(name):
"""Platform-independent version of UNIX which utility."""
if os.name == 'nt':
paths = os.getenv('PATH').split(os.pathsep)
exts = os.getenv('PATHEXT').split(os.pathsep)
for path in paths:
testpath = os.path.join(path, name)
if os.path.isfile(testpath):
return testpath
for ext in exts:
testpathext = testpath + ext
if os.path.isfile(testpathext):
return testpathext
return None
else: # assume UNIX-like
try:
return check_output(['which', name])[0].strip()
except subprocess.CalledProcessError:
return None
_bash_color_regex = re.compile('\x1b\[[0-9;]+m')
def strip_bash_colors(text):
return _bash_color_regex.sub('', text)
def format_duration(seconds, sep=' ', order=['day', 'hour', 'minute', 'second']): # pylint: disable=dangerous-default-value
"""
Formats the specified number of seconds into human-readable duration.
"""
if isinstance(seconds, timedelta):
td = seconds
else:
td = timedelta(seconds=seconds)
dt = datetime(1, 1, 1) + td
result = []
for item in order:
value = getattr(dt, item, None)
if item is 'day':
value -= 1
if not value:
continue
suffix = '' if value == 1 else 's'
result.append('{} {}{}'.format(value, item, suffix))
return sep.join(result)
def get_article(word):
"""
Returns the appropriate indefinite article for the word (ish).
.. note:: Indefinite article assignment in English is based on
sound rather than spelling, so this will not work correctly
in all case; e.g. this will return ``"a hour"``.
"""
return'an' if word[0] in 'aoeiu' else 'a'
def get_random_string(length):
"""Returns a random ASCII string of the specified length)."""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in xrange(length))
class LoadSyntaxError(Exception):
def __init__(self, message, filepath, lineno):
super(LoadSyntaxError, self).__init__(message)
self.filepath = filepath
self.lineno = lineno
def __str__(self):
message = 'Syntax Error in {}, line {}:\n\t{}'
return message.format(self.filepath, self.lineno, self.message)
RAND_MOD_NAME_LEN = 30
BAD_CHARS = string.punctuation + string.whitespace
TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
def to_identifier(text):
"""Converts text to a valid Python identifier by replacing all
whitespace and punctuation."""
return re.sub('_+', '_', text.translate(TRANS_TABLE))
def load_struct_from_python(filepath=None, text=None):
"""Parses a config structure from a .py file. The structure should be composed
of basic Python types (strings, ints, lists, dicts, etc.)."""
if not (filepath or text) or (filepath and text):
raise ValueError('Exactly one of filepath or text must be specified.')
try:
if filepath:
modname = to_identifier(filepath)
mod = imp.load_source(modname, filepath)
else:
modname = get_random_string(RAND_MOD_NAME_LEN)
while modname in sys.modules: # highly unlikely, but...
modname = get_random_string(RAND_MOD_NAME_LEN)
mod = imp.new_module(modname)
exec text in mod.__dict__ # pylint: disable=exec-used
return dict((k, v)
for k, v in mod.__dict__.iteritems()
if not k.startswith('_'))
except SyntaxError as e:
raise LoadSyntaxError(e.message, filepath, e.lineno)
def load_struct_from_yaml(filepath=None, text=None):
"""Parses a config structure from a .yaml file. The structure should be composed
of basic Python types (strings, ints, lists, dicts, etc.)."""
if not (filepath or text) or (filepath and text):
raise ValueError('Exactly one of filepath or text must be specified.')
try:
if filepath:
with open(filepath) as fh:
return yaml.load(fh)
else:
return yaml.load(text)
except yaml.YAMLError as e:
lineno = None
if hasattr(e, 'problem_mark'):
lineno = e.problem_mark.line # pylint: disable=no-member
raise LoadSyntaxError(e.message, filepath=filepath, lineno=lineno)
def load_struct_from_file(filepath):
"""
Attempts to parse a Python structure consisting of basic types from the specified file.
Raises a ``ValueError`` if the specified file is of unkown format; ``LoadSyntaxError`` if
there is an issue parsing the file.
"""
extn = os.path.splitext(filepath)[1].lower()
if (extn == '.py') or (extn == '.pyc') or (extn == '.pyo'):
return load_struct_from_python(filepath)
elif extn == '.yaml':
return load_struct_from_yaml(filepath)
else:
raise ValueError('Unknown format "{}": {}'.format(extn, filepath))
def unique(alist):
"""
Returns a list containing only unique elements from the input list (but preserves
order, unlike sets).
"""
result = []
for item in alist:
if item not in result:
result.append(item)
return result
def open_file(filepath):
"""
Open the specified file path with the associated launcher in an OS-agnostic way.
"""
if os.name == 'nt': # Windows
return os.startfile(filepath) # pylint: disable=no-member
elif sys.platform == 'darwin': # Mac OSX
return subprocess.call(['open', filepath])
else: # assume Linux or similar running a freedesktop-compliant GUI
return subprocess.call(['xdg-open', filepath])
def ranges_to_list(ranges_string):
"""Converts a sysfs-style ranges string, e.g. ``"0,2-4"``, into a list ,e.g ``[0,2,3,4]``"""
values = []
for rg in ranges_string.split(','):
if '-' in rg:
first, last = map(int, rg.split('-'))
values.extend(xrange(first, last + 1))
else:
values.append(int(rg))
return values
def list_to_ranges(values):
"""Converts a list, e.g ``[0,2,3,4]``, into a sysfs-style ranges string, e.g. ``"0,2-4"``"""
range_groups = []
for _, g in groupby(enumerate(values), lambda (i, x): i - x):
range_groups.append(map(itemgetter(1), g))
range_strings = []
for group in range_groups:
if len(group) == 1:
range_strings.append(str(group[0]))
else:
range_strings.append('{}-{}'.format(group[0], group[-1]))
return ','.join(range_strings)
def list_to_mask(values, base=0x0):
"""Converts the specified list of integer values into
a bit mask for those values. Optinally, the list can be
applied to an existing mask."""
for v in values:
base |= (1 << v)
return base
def mask_to_list(mask):
"""Converts the specfied integer bitmask into a list of
indexes of bits that are set in the mask."""
size = len(bin(mask)) - 2 # because of "0b"
return [size - i - 1 for i in xrange(size)
if mask & (1 << size - i - 1)]
def sha256(path, chunk=2048):
"""Calculates SHA256 hexdigest of the file at the specified path."""
h = hashlib.sha256()
with open(path, 'rb') as fh:
buf = fh.read(chunk)
while buf:
h.update(buf)
buf = fh.read(chunk)
return h.hexdigest()
def urljoin(*parts):
return '/'.join(p.rstrip('/') for p in parts)
__memo_cache = {}
def memoized(func):
"""A decorator for memoizing functions and methods."""
func_id = repr(func)
def memoize_wrapper(*args, **kwargs):
id_string = func_id + ','.join([str(id(a)) for a in args])
id_string += ','.join('{}={}'.format(k, v)
for k, v in kwargs.iteritems())
if id_string not in __memo_cache:
__memo_cache[id_string] = func(*args, **kwargs)
return __memo_cache[id_string]
return memoize_wrapper
def commonprefix(file_list, sep=os.sep):
"""
Find the lowest common base folder of a passed list of files.
"""
common_path = os.path.commonprefix(file_list)
cp_split = common_path.split(sep)
other_split = file_list[0].split(sep)
last = len(cp_split) - 1
if cp_split[last] != other_split[last]:
cp_split = cp_split[:-1]
return sep.join(cp_split)
| apache-2.0 |
vberthiaume/digitalFilters | ch3/p144fig3.2.py | 1 | 2179 | import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
def filterslow(B, A, x):
# FILTERSLOW: Filter x to produce y = (B/A) x . Equivalent to 'y = filter(B,A,x)' using a slow (but tutorial) method.
NB = len(B)
NA = len(A)
Nx = len(x)
#first index of B contains gain for whole input x
v = B[0] * x
#other indices in B contain gain for delayed input, ie, feedforward gains
if NB > 1:
# we loop until we run out of either feedforward gains or samples
for k in np.arange(1, min(NB, Nx)):
# to delay the input by i-1 samples, we simply insert i-1 0s in front of x
padding = np.zeros(k - 1)
xdelayed = np.hstack((padding, x[0 : Nx-k+1]))
#having 0s at the front of xdelayed means that the corresponding samples in v will not be updated
#because they were already processed on previous iterations
v += B[k] * xdelayed
# The feedback part is intrinsically scalar, so this loop is where we spend a lot of time.
y = np.zeros(len(x))
ac = A[1:NA] #so we just ignore A[0]?
# loop over input samples
for i in range (Nx):
t = v[i] # initialize accumulator
#if we have feedback gains
if NA > 1:
for k in range (NA-1):
if i > k:
t -= ac[k]*y[i-k]
#else y(i-k) = 0
y[i] = t
return y
N = 10000
x = np.random.random(N) # random input signal
B = np.random.random(101) # random coefficients
A = np.hstack((1, 0.001*np.random.random(100))) # random but probably stable
#tic
yf = scipy.signal.lfilter(B, A, x)
#ft = toc
#tic
yfs = filterslow(B, A, x)
#fst=toc
# my plottings
fig, axarr = plt.subplots(3, sharex=False)
axarr[0].plot(np.arange(N), x)
axarr[0].set_title('random numbers')
axarr[1].plot(np.arange(N), yf)
axarr[1].set_title('lfiltered random numbers')
axarr[2].plot(np.arange(N), yfs)
axarr[2].set_title('filtered slow random numbers')
plt.show()
| gpl-3.0 |
Eric89GXL/scikit-learn | sklearn/svm/tests/test_bounds.py | 42 | 2112 | import nose
from nose.tools import assert_true
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['l2', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'l2': LinearSVC(loss='l2', penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
murali-munna/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | benchmarks/bench_lasso.py | 111 | 3364 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/linear_model/randomized_l1.py | 25 | 24850 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features (See :ref:`User Guide <randomized_l1>` for details ).
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in ascending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stability path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| mit |
benoitsteiner/tensorflow-opencl | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
michalsa/md_hershberg | refGenomes.py | 1 | 60845 | from Bio import SeqIO, SeqRecord, SeqFeature
import csv
import re
import itertools
from scipy import stats
import subprocess, os, sys, threading
import pandas
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
IDENTITYPCNT = 90
ALMINLENGTH = 0.9
GBKDIR = '/home/michalsa/Documents/research/codon_bias/ncbi_ftp_genomes/all_gbk'
FFNDIR = '/home/michalsa/Documents/research/codon_bias/ncbi_ftp_genomes/all_ffn'
OUTPUTDIR = '/home/michalsa/Documents/research/codon_bias/ncbi_ftp_genomes/output'
SEEDDIR = '/home/michalsa/Documents/research/codon_bias/ncbi_ftp_genomes/seed'
PHISPYDIR = '/home/michalsa/Documents/research/codon_bias/ncbi_ftp_genomes/phispy'
ENCPATH = '/home/michalsa/Downloads/ENCprime-master/bin'
AADICT = {'ala': ['gct', 'gcc', 'gca', 'gcg'],
'arg': ['cgt', 'cgc', 'cga', 'cgg', 'aga', 'agg'],
'asn': ['aat', 'aac'],
'asp': ['gat', 'gac'],
'cys': ['tgt', 'tgc'],
'gln': ['caa', 'cag'],
'glu': ['gaa', 'gag'],
'gly': ['ggt', 'ggc', 'gga', 'ggg'],
'his': ['cat', 'cac'],
'ile': ['att', 'atc', 'ata'],
'leu': ['tta', 'ttg', 'ctt', 'ctc', 'cta', 'ctg'],
'lys': ['aaa', 'aag'],
'phe': ['ttt', 'ttc'],
'pro': ['cct', 'ccc', 'cca', 'ccg'],
'ser': ['tct', 'tcc', 'tca', 'tcg', 'agt', 'agc'],
'thr': ['act', 'acc', 'aca', 'acg'],
'tyr': ['tat', 'tac'],
'val': ['gtt', 'gtc', 'gta', 'gtg']}
OPTDICT = {'Escherichia_coli': {'ala': ['gcg'],
'arg': ['cgt'],
'asn': ['aac'],
'asp': ['gac'],
'cys': ['tgc'],
'gln': ['cag'],
'glu': ['gaa'],
'gly': ['ggc'],
'his': ['cac'],
'ile': ['atc'],
'leu': ['ctg'],
'lys': ['aaa'],
'phe': ['ttc'],
'pro': ['ccg'],
'ser': ['tcc', 'agc'],
'thr': ['acc'],
'tyr': ['tac'],
'val': ['gtg']}}
chunk_size = 1024
timeout_sec = 3000
csv.field_size_limit(sys.maxsize)
class SubprocessTimeoutError(RuntimeError):
pass
def hit2dict(hit):
'''result indices:
0-query id, 1-subject id, 2-% identity, 3-alignment length, 4-mismatches, 5-gap opens,
6-q. start, 7-q. end, 8-s. start, 9-s. end, 10-evalue, 11-bit score'''
hit_arr = hit.strip().split('\t')
hit_dict = {'query_id': hit_arr[0],
'subject_id': hit_arr[1],
'identity%': float(hit_arr[2]),
'al_length': int(hit_arr[3]),
'q_length': abs(int(hit_arr[7])-int(hit_arr[6]))+1,
's_length': abs(int(hit_arr[9])-int(hit_arr[8]))+1}
return hit_dict
def run_fasta(q_path, lib_path, output_path, use_stdin=False):
params = 'fasta36 -b =1 -d 1 -m 8C -m 10'
io = [q_path, lib_path, '>', output_path]
if use_stdin==True:
params = 'cat %s | ' % q_path + params + '-n'
io[0] = '@'
run_command_with_timeout(['/bin/bash', '-i', '-c', '%s %s' % (params, ' '.join(io))], 300, output_path)
'''
sp = subprocess.Popen(['/bin/bash', '-i', '-c', '%s %s' % (params, ' '.join(io))])
sp.communicate()
'''
def run_command_with_timeout(cmd, timeout_sec, output_path):
"""Execute `cmd` in a subprocess and enforce timeout `timeout_sec` seconds.
Return subprocess exit code on natural completion of the subprocess.
Raise an exception if timeout expires before subprocess completes."""
proc = subprocess.Popen(cmd)
proc_thread = threading.Thread(target=proc.communicate)
proc_thread.start()
proc_thread.join(timeout_sec)
'''if no progress has been made by timeout expiration, kill process'''
if proc_thread.is_alive() and os.stat(output_path).st_size==0:
# Process still running - kill it and raise timeout error
try:
proc.kill()
except OSError, e:
# The process finished between the `is_alive()` and `kill()`
return proc.returncode
'''# OK, the process was definitely killed
raise SubprocessTimeoutError('Process #%d killed after %f seconds' % (proc.pid, timeout_sec))'''
# Process completed naturally - return exit code
return proc.returncode
def gb2seed(org_name, strains_fname):
count = 0
if not os.path.exists(os.path.join(SEEDDIR, org_name)):
os.mkdir(os.path.join(SEEDDIR, org_name))
strains_df = pandas.DataFrame.from_csv(strains_fname, index_col=None)
for st_ix, strain_row in strains_df.iterrows():
strain_name = strain_row['strain']
strain_uid = strain_row['uid']
dir_name = '_'.join([org_name, strain_name, 'uid%s' % strain_uid])
print dir_name
if not os.path.exists(os.path.join(SEEDDIR, org_name, dir_name)):
os.mkdir(os.path.join(SEEDDIR, org_name, dir_name))
for filename in os.listdir(os.path.join(GBKDIR, dir_name)):
print filename
if strain_row['keys'] == re.search('^(.*).gbk$', filename).group(1):
if not os.path.exists(os.path.join(SEEDDIR, org_name, dir_name, re.search('^(.*).gbk$', filename).group(1))) \
or len(os.listdir(os.path.join(SEEDDIR, org_name, dir_name, re.search('^(.*).gbk$', filename).group(1)))) == 0:
os.chdir('/home/michalsa/Downloads/phiSpyNov11_v2.3/')
filename_abs = os.path.join(GBKDIR, dir_name, filename)
org_dir = os.path.join(SEEDDIR, org_name, dir_name, re.search('^(.*).gbk$', filename).group(1))
subprocess.call(['python', 'genbank_to_seed.py', filename_abs, org_dir])
else:
print "\tseed conversion already exists"
count+=1
print "%d seed conversions for %s" % (count, org_name)
def runPhiSpy(org_name, strains_fname):
numOrg = 0
if org_name == "Escherichia_coli":
numOrg = 9
if not os.path.exists(os.path.join(PHISPYDIR, org_name)):
os.mkdir(os.path.join(PHISPYDIR, org_name))
strains_df = pandas.DataFrame.from_csv(strains_fname, index_col=None)
for st_ix, strain_row in strains_df.iterrows():
strain_name = strain_row['strain']
strain_uid = strain_row['uid']
strain_key = strain_row['keys']
dir_name = '_'.join([org_name, strain_name, 'uid%s' % strain_uid])
print dir_name
if not os.path.exists(os.path.join(PHISPYDIR, org_name, dir_name)):
os.mkdir(os.path.join(PHISPYDIR, org_name, dir_name))
os.chdir('/home/michalsa/Downloads/phiSpyNov11_v2.3/')
dirname_abs = os.path.join(SEEDDIR, org_name, dir_name, strain_key)
output_dir = os.path.join(PHISPYDIR, org_name, dir_name, strain_key)
if not os.path.exists(output_dir):
subprocess.call(['python', 'phiSpy.py', '-i', dirname_abs, '-o', output_dir, '-t', '%s' % numOrg])
else:
print '\tviral data file already exists'
def createPanGenome(org_name, strains_fname, unifiable_fname):
pairwise_output_path = os.path.join(OUTPUTDIR, org_name, 'fasta_out_fluidity')
fasta_output_path = os.path.join(OUTPUTDIR, org_name, 'fasta_out_pangenome')
if not os.path.isdir(fasta_output_path): os.mkdir(fasta_output_path)
strains_df = pandas.DataFrame.from_csv(strains_fname, index_col=None)
unifiable_df = pandas.DataFrame.from_csv(unifiable_fname)
fluitidy_thresholds = list(unifiable_df.columns.values)
for threshold in fluitidy_thresholds:
threshold_str = '%se-%d' % (re.search('([1-9])', str(threshold)).group(1), str(threshold).count('0'))
'''create directory for threshold, if it doesn't exist'''
if not os.path.exists(os.path.join(fasta_output_path, threshold_str)):
os.mkdir(os.path.join(fasta_output_path, threshold_str))
'''create dict for pangenes'''
pangenes = defaultdict(lambda: defaultdict (list))
'''fasta filepath for pangenes'''
pangenome_filepath = os.path.join(OUTPUTDIR, org_name, 'pangenome_identity%s_fluidity%s.ffn' % (IDENTITYPCNT, threshold_str))
'''create csv summary file, if exists - add its contents to pangenes dict'''
summary_path = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%s_fluidity%s.tsv' % (IDENTITYPCNT, threshold_str))
if os.path.isfile(summary_path) and os.stat(summary_path).st_size!=0:
summary_fh = open(summary_path, 'r')
#summary_reader = csv.reader(summary_fh, delimiter='\t')
for line in summary_fh:
row = line.split('\t')
if len(row)==3:
pangenes.update({row[0]: {'paralogs': row[1].split(';'),
'orthologs': row[2].split(';')}})
print '%d records' % len(pangenes.keys())
'''create directory for unified genomes and pangenome output, if they don't exist'''
al_types = ['unified', 'pangenome']
for al_type in al_types:
if not os.path.exists(os.path.join(fasta_output_path, threshold_str, al_type)):
os.mkdir(os.path.join(fasta_output_path, threshold_str, al_type))
print threshold
for st_ix, strain_row in strains_df.iterrows():
strain_name = strain_row['strain']
uid = strain_row['uid']
q_name = strain_row['keys']
strain_idx = '_'.join([strain_name, 'uid%s' % uid])
dir_name = '_'.join([org_name, strain_idx])
if type(q_name)!=str:
raise ValueError('Incorrect key format for %s: %s. Should be str' % (dir_name, type(q_name)))
q_path = os.path.join(FFNDIR, dir_name, '%s.ffn' % q_name)
#print q_path
'''check whether strain is already accounted for in existing unified genomes - default is True, i.e., unified genome should be created for strain'''
unifiable_flag = True
for filename in os.listdir(os.path.join(fasta_output_path, threshold_str, 'unified')):
if re.search(strain_idx, filename):
unifiable_flag = False
break
if unifiable_flag == True:
strain_unifiables = []
if type(unifiable_df[threshold][strain_idx])==str: strain_unifiables = unifiable_df[threshold][strain_idx].split(';')
unifiables_dir = os.path.join(fasta_output_path, threshold_str, 'unified')
if len(strain_unifiables) > 0:
unified_filename = os.path.join(unifiables_dir, '%s_%s.ffn' % (strain_idx, '_'.join(strain_unifiables)))
else:
unified_filename = os.path.join(unifiables_dir, '%s.ffn' % strain_idx)
'''paste data from first entry in unified genome'''
unified_fh = open(unified_filename, 'a')
q_records = SeqIO.parse(open(q_path, 'r'), 'fasta')
for q_record in q_records:
SeqIO.write(q_record, unified_fh, 'fasta')
print strain_idx, '%d records' % sum([1 for record in SeqIO.parse(open(unified_filename, 'r'), 'fasta')])
for unifiable_idx in strain_unifiables:
unique_count = 0
unifiable_name = None
unifiable_st, unifiable_uid = unifiable_idx.split('_uid')
for ix, rw in strains_df.iterrows():
if str(rw['strain']) == str(unifiable_st) and int(rw['uid']) == int(unifiable_uid):
unifiable_name = rw['keys']
#print rw['strain'], unifiable_st, rw['uid'], unifiable_uid
unifiable_fh = open(os.path.join(FFNDIR, '_'.join([org_name, unifiable_idx]), '%s.ffn' % unifiable_name), 'r')
print unifiable_idx, '%d records' % sum([1 for record in SeqIO.parse(unifiable_fh, 'fasta')])
'''check whether pairwise alignment exists'''
pairwise_res = os.path.join(pairwise_output_path, '%s_vs_%s.txt' % (unifiable_idx, strain_idx))
if pairwise_res.split('/')[-1] not in os.listdir(pairwise_output_path) or os.stat(pairwise_res).st_size==0:
raise NameError('No such file: %s' % pairwise_res)
res_fh = open(pairwise_res, 'r')
for line in res_fh:
if not re.search('^#', line):
al_dict = hit2dict(line)
unique_flag = False
'''if gene is unique, add it to unified genome'''
if al_dict['identity%'] < IDENTITYPCNT or al_dict['al_length'] < ALMINLENGTH*al_dict['q_length']:
unique_flag = True
'''parse all pairwise comparisons between unifiable and previous unifiables'''
if strain_unifiables.index(unifiable_idx)>0:
for unifiable_comp in strain_unifiables[:strain_unifiables.index(unifiable_idx)]:
unifiables_res = os.path.join(pairwise_output_path, '%s_vs_%s.txt' % (unifiable_idx, unifiable_comp))
unifiables_fh = open(unifiables_res, 'r')
for line_comp in unifiables_fh:
if not re.search('^#', line_comp):
'''unifiable is query'''
al_comp_dict = hit2dict(line_comp)
if al_comp_dict['query_id']==al_dict['query_id'] and \
not (al_comp_dict['identity%'] < IDENTITYPCNT or al_comp_dict['al_length'] < ALMINLENGTH*al_comp_dict['q_length']):
unique_flag = False
break
if unique_flag==False:
break
'''add record to unified genome only if it didn't exist in any of the previous unifiables'''
if unique_flag==True:
unique_count+=1
unifiable_fh.seek(0)
unifiable_records = SeqIO.parse(unifiable_fh, 'fasta')
for record in unifiable_records:
if record.id == al_dict['query_id']:
SeqIO.write(record, unified_fh, 'fasta')
break
print '%d unique' % unique_count
print unified_filename.split('/')[-1], '%d records' % sum([1 for record in SeqIO.parse(open(unified_filename, 'r'), 'fasta')])
'''iterate over unified genomes'''
for unified_genome in sorted(os.listdir(os.path.join(fasta_output_path, threshold_str, 'unified'))):
print unified_genome
genes_count, unique_genes_count = 0, 0
genome_filepath = os.path.join(fasta_output_path, threshold_str, 'unified', unified_genome)
pangenome_alignment = os.path.join(fasta_output_path, threshold_str, 'pangenome', '%s.out' % re.search('(.*)\.ffn', unified_genome).group(1))
if not os.path.isfile(pangenome_filepath) or os.stat(pangenome_filepath).st_size==0:
SeqIO.write(SeqIO.parse(open(genome_filepath, 'r'), 'fasta'), open(pangenome_filepath, 'w'), 'fasta')
pangenome_fh = open(pangenome_filepath, 'r')
pangenome_fasta = SeqIO.parse(pangenome_fh, 'fasta')
for record in pangenome_fasta:
genes_count+=1
pangenes.update({record.id: {'paralogs': [],
'orthologs': []}})
else:
if not os.path.isfile(pangenome_alignment):
run_fasta(genome_filepath, pangenome_filepath, pangenome_alignment)
if os.stat(pangenome_alignment).st_size==0:
run_fasta(genome_filepath, pangenome_filepath, pangenome_alignment, use_stdin=True)
if os.path.isfile(pangenome_alignment) and os.stat(pangenome_alignment).st_size>0:
al_outfh = open(pangenome_alignment, 'r')
genome_fh = open(genome_filepath, 'r')
pangenome_fh = open(pangenome_filepath, 'a')
ids = []
for line in al_outfh:
genome_fh.seek(0)
genome_fasta = SeqIO.parse(genome_fh, 'fasta')
if not re.search('^#', line):
al_dict = hit2dict(line)
'''unified genome is query, pangenome is subject'''
if not al_dict['query_id'] in ids:
ids.append(al_dict['query_id'])
genes_count+=1
else: continue
'''if gene is unique, add it to pangenome; else, append it to the corresponding list'''
if al_dict['identity%'] < IDENTITYPCNT or al_dict['al_length']<ALMINLENGTH*al_dict['q_length']:
unique_genes_count+=1
pangenes.update({al_dict['query_id']: {'paralogs': [],
'orthologs': []}})
#print 'query id: %s' % al_dict['query_id']
for record in genome_fasta:
if record.id == al_dict['query_id']:
#print 'record id: %s' % record.id
SeqIO.write(record, pangenome_fh, 'fasta')
break
else:
for pangene_key in pangenes.keys():
if al_dict['subject_id']==pangene_key:
if al_dict['query_id'].split('|')[3]==pangene_key.split('|')[3]:
pangenes[pangene_key]['paralogs'].append(al_dict['query_id'])
else:
pangenes[pangene_key]['orthologs'].append(al_dict['query_id'])
break
print sum([1 for key in pangenes.keys()])
pangenes_count = sum([1 for record in SeqIO.parse(open(pangenome_filepath, 'r'), 'fasta')])
print '%d genes, %d unique; pangenome contains %d genes' % (genes_count, unique_genes_count, pangenes_count)
'''write all information stored in pangenes dict to an output file'''
summary_fh = open(summary_path, 'w')
#genes_summary = csv.writer(summary_fh, delimiter='\t')
for pangene in pangenes.keys():
row = [pangene, ';'.join(pangenes[pangene]['paralogs']), ';'.join(pangenes[pangene]['orthologs'])]
summary_fh.write('%s\n' % '\t'.join(row))
return
def pangenome_self_compare(org_name):
dir_path = os.path.join(OUTPUTDIR, org_name)
list_dir = os.listdir(dir_path)
for filename in list_dir:
if re.search('^pangenome.*\.ffn$', filename) and not re.search('selfcmp', filename):
identity, fluidity = re.search('identity(\d+)', filename).group(1), re.search('fluidity(\d+)', filename).group(1)
pangenome_fname = os.path.join(dir_path, filename)
self_cmp_fname = os.path.join(dir_path, 'fasta_out_pangenome', fluidity, 'pangenome_self_cmp.out')
print filename, '%d genes' % sum([1 for record in SeqIO.parse(open(pangenome_fname, 'r'), 'fasta')])
'''get pangenome df'''
pangenes_fname = os.path.join(dir_path, 'pangenome_summary_identity%s_fluidity%s.csv' % (identity, fluidity))
pangenes_fh = open(pangenes_fname, 'r')
#pangenes_csv = csv.reader(open(pangenes_fname, 'r'), delimiter='\t')
pangenes_dict = defaultdict(list)
for line in pangenes_fh:
row = line.split('\t')
pangenes_dict.update({row[0]: row[1:]})
'''perform self comparison on pangenome'''
if not os.path.isfile(self_cmp_fname):
sp = subprocess.Popen(['/bin/bash', '-i', '-c', 'fasta36 -b =2 -d 2 -m 8C -m 10 %s %s > %s' %
(pangenome_fname, pangenome_fname, self_cmp_fname)])
sp.communicate()
if os.stat(self_cmp_fname).st_size==0:
sp = subprocess.Popen(['/bin/bash', '-i', '-c', 'cat %s | fasta36 -b =2 -d 2 -m 8C -m 10 -n @ %s > %s' %
(pangenome_fname, pangenome_fname, self_cmp_fname)])
sp.communicate()
'''find all matchable genes'''
matches_count = 0
self_cmp_fh = open(self_cmp_fname, 'r')
for line in self_cmp_fh:
if not re.search('^#', line):
al_dict = hit2dict(line)
if al_dict['query_id']!=al_dict['subject_id']:
if not (al_dict['identity%']<int(identity) or al_dict['al_length']<ALMINLENGTH*al_dict['q_length']):
query, subject = None, None
for key in pangenes_dict.keys():
if re.search(re.escape(al_dict['query_id']), key):
query = key
if re.search(re.escape(al_dict['subject_id']), key):
subject = key
if query!=None and subject!=None:
matches_count+=1
append_vals = [query, ] + pangenes_dict.pop(query)
pangenes_dict[subject]+=append_vals
print '%d matches' % matches_count
'''copy unique entries to new ffn and write updated dict to new csv'''
updated_pangenome_fname = os.path.join(dir_path, 'pangenome_identity%s_fluidity%s_selfcmp.ffn' % (identity, fluidity))
updated_pangenome_fh = open(updated_pangenome_fname, 'w')
updated_pangenes_fname = os.path.join(dir_path, 'pangenome_summary_identity%s_fluidity%s_selfcmp.csv' % (identity, fluidity))
#updated_pangenes_csv = csv.writer(open(updated_pangenes_fname, 'w'), delimiter='\t')
updated_pangenes_fh = open(updated_pangenes_fname, 'w')
for record in SeqIO.parse(open(pangenome_fname, 'r'), 'fasta'):
if record.id in pangenes_dict.keys():
print record
SeqIO.write(record, updated_pangenome_fh, 'fasta')
updated_row = [record.id, ] + pangenes_dict[record.id]
updated_pangenes_fh.write('%s\n' % '\t'.join(updated_row))
print '%d records after self compare' % (len(pangenes_dict.keys()))
return
def create_hist(org_name, strains_fname, unifiable_fname):
strains_fh = open(os.path.join(OUTPUTDIR, org_name, 'strains_plasmids_ncbiFTP.csv'))
unifiables_fh = open(os.path.join(OUTPUTDIR, org_name, 'unifiable_strains.csv'))
paralogs_fh = open(os.path.join(OUTPUTDIR, org_name, 'paralogs.tsv'))
strains = [row for row in csv.DictReader(strains_fh)]
unifiables = [row for row in csv.DictReader(unifiables_fh)]
unifiables_fh.seek(0)
unifiables_headers = csv.DictReader(unifiables_fh).fieldnames
thresholds = [header for header in unifiables_headers if re.search('0\.\d+', header)]
paralogs = list(set([el for row in csv.reader(paralogs_fh, delimiter='\t') for el in row]))
results_fh = open(os.path.join(OUTPUTDIR, org_name, 'hist_results.csv'), 'w')
for threshold in thresholds:
threshold_str = '%se-%d' % (re.search('([1-9])', threshold).group(1), threshold.count('0'))
print threshold_str
bins = []
unified_genomes_dir = os.path.join(OUTPUTDIR, org_name, 'fasta_out_pangenome', threshold_str, 'unified')
for unified_genome_fname in os.listdir(unified_genomes_dir):
bins.append(re.search('(.*)\.ffn', unified_genome_fname).group(1))
n = len(os.listdir(unified_genomes_dir))
print n
fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_corrected_afterselfcmp.tsv' % (IDENTITYPCNT, threshold_str))
fh = open(fname, 'r')
total = 0
count_pangenes = [0]*n
count_viral = [0]*n
for line in fh:
row = line.strip().split('\t')
if len(row)>0:
if row[0] not in paralogs and re.search('(NC_\d+)\.\d', row[0].split('|')[3]):
#if re.search('(NC_\d+)\.\d', row[0].split('|')[3]):
viral_flag = False
g_refs = [row[0]]
g_id = re.search('(NC_\d+)\.\d', row[0].split('|')[3]).group(1)
g_ids = [g_id]
if len(row)>1:
g_refs+=[g for g in row[1].strip().split(';') if g!='']
g_ids+=[re.search('(NC_\d+)\.\d', g.split('|')[3]).group(1) for g in g_refs[1:] if re.search('(NC_\d+)\.\d', g.split('|')[3])]
if len(g_refs)!=len(g_ids):
print g_refs
print g_ids
return
l_ids = []
for g_id in g_ids:
l_bins = ''
for b in bins:
if re.search(g_id, b):
l_bins = re.findall('NC_\d+', b)
st_name = ''
for strain in strains:
if strain['keys']==g_id:
st_name = '%s_%s_uid%s' % (org_name, strain['strain'], strain['uid'])
phispy_output_fname = os.path.join(PHISPYDIR, org_name, st_name, g_id, 'prophage_tbl.txt')
if l_bins=='' or not (g_id in l_bins and True in [b in l_ids for b in l_bins]):
if g_id not in l_ids:
l_ids.append(g_id)
'''add search for gene in prophage table generated by phispy'''
if os.path.isfile(phispy_output_fname):
phispy_output_fh = open(phispy_output_fname, 'r')
for line in phispy_output_fh:
'''3 - start, 4 - stop, 9 - final_status'''
res = line.strip().split('\t')
g_arr = g_refs[g_ids.index(g_id)].split('|')
pos = re.findall('\d+', g_arr[-1])
if res[3] in pos and res[4] in pos:
if res[9]=='1':
viral_flag = True
break
if len(row)==1 or False not in [l not in paralogs for l in l_ids]:
total+=1
for i in range(1, n+1):
if len(l_ids) in range(i, i+1):
count_pangenes[i-1]+=1
if viral_flag==True:
count_viral[i-1]+=1
'''
if len(l_ids)==n:
count_pangenes[-1]+=1
if viral_flag==True:
count_viral[-1]+=1
'''
output_line = '%s\n' % '\t'.join([','.join([str(itm) for itm in count_pangenes]),
','.join([str(itm) for itm in count_viral]),
','. join([str(float(count_viral[i])/count_pangenes[i]) for i in range(n)])])
print output_line
results_fh.write(output_line)
return results_fh.name
def plot_hist(y_arr_pangenes=[], y_arr_pangenes_viral=[], y_arr_percentages=[]):
width = 0.4
labels = ['1e-1', '8e-2', '5e-2', '3e-2', '2e-2']
colors = ['r', 'g', 'b', 'y', 'c', 'm']
fig, axarr = plt.subplots(2, 5, sharex='none')
rects_pangenes = []
rects_percentages = []
for i in range(5):
x_arr = range(len(y_arr_pangenes[i]))
if y_arr_pangenes!=[] and y_arr_pangenes_viral!=[]:
rects_pangenes.append([axarr[0][i].bar([width/2+j for j in x_arr], y_arr_pangenes[i], width, color=colors[i]),
axarr[0][i].bar([width/2+j for j in x_arr], y_arr_pangenes_viral[i], width, color=colors[-1])])
if y_arr_percentages!=[]:
rects_percentages.append(axarr[1][i].bar([width/2+j for j in x_arr], y_arr_percentages[i], width, color=colors[i]))
for k in range(2):
for i in range(5):
x_arr = range(len(y_arr_pangenes[i]))
axarr[k][i].set_xlim(-0.5*width,len(x_arr)+0.5*width)
for i in range(5):
axarr[0][i].set_ylim(0,max(y_arr_pangenes[i])*1.05)
#axarr[1][i].set_ylim(0,np.round(max(y_arr_percentages[i])+0.05, 2))
axarr[0][i].set_yticks(np.arange(0, max(y_arr_pangenes[i]), 200))
#axarr[1][i].set_yticks(np.arange(0, max(y_arr_pangenes[i]), 0.05))
for k in range(2):
axarr[k][i].set_title('%s\n(n=%d)' % (labels[i], len(y_arr_pangenes[i])))
axarr[k][i].grid(True)
axarr[0][i].legend((rects_pangenes[i][0], rects_pangenes[i][1]), ('total', 'viral'))
axarr[0][0].set_ylabel('# of pangenes')
axarr[1][0].set_ylabel('viral genes ratio')
for k in range(2):
for j in range(5):
x_arr = range(len(y_arr_pangenes[j]))
axarr[k][j].set_xticks([i+width for i in x_arr])
axarr[k][j].set_xticklabels(tuple([str(i+1) if (i==0 or (i+1)%5==0 or (i==len(x_arr)-1 and (i+1)%5>1)) else '' for i in x_arr]))
def autolabel(rects, i, k):
# attach some text labels
for rect in rects:
height = rect.get_height()
if height>1: axarr[k][i].text(rect.get_x()+rect.get_width()/2., 1.02*height, height, ha='center', va='bottom')
elif height>0 and height<1: axarr[k][i].text(rect.get_x()+rect.get_width()/2., 1.02*height, '%.2f'%height, ha='center', va='bottom')
'''
for i in range(5):
for j in range(2):
autolabel(rects_pangenes[i][j], i, 0)
autolabel(rects_percentages[i], i, 1)
'''
plt.show()
return
def calculate_enc(org_name, prog_name):
strains_fh = open(os.path.join(OUTPUTDIR, org_name, 'strains_plasmids_ncbiFTP.csv'))
unifiables_fh = open(os.path.join(OUTPUTDIR, org_name, 'unifiable_strains.csv'))
strains = [row for row in csv.DictReader(strains_fh)]
#unifiables = [row for row in csv.DictReader(unifiables_fh)]
for strain_row in strains:
print '_'.join([org_name, strain_row['strain'], 'uid%s' % strain_row['uid']])
strain_fname = os.path.join(FFNDIR,
'_'.join([org_name, strain_row['strain'], 'uid%s' % strain_row['uid']]),
'%s.ffn' % strain_row['keys'])
n_genes = sum([1 for record in SeqIO.parse(open(strain_fname, 'r'), 'fasta')])
encprime_output_dir = os.path.join(OUTPUTDIR, org_name, 'encprime', '_'.join([org_name, strain_row['strain'], 'uid%s' % strain_row['uid']]))
if not os.path.isdir(encprime_output_dir):
os.mkdir(encprime_output_dir)
os.chdir(encprime_output_dir)
if prog_name=='SeqCount_mod':
for flag in ['-c', '-n']:
opts = '%s %s %s %d' % (os.path.join(ENCPATH, prog_name), flag, strain_fname, n_genes)
subprocess.call(['/bin/bash', '-i', '-c', opts])
elif prog_name=='ENCprime':
opts = '%s %s %s 11 %s 1 -q' % (os.path.join(ENCPATH, prog_name),
'%s.ffn.codcnt' % strain_row['keys'],
'%s.ffn.acgtfreq' % strain_row['keys'],
'%s.ffn.results' % strain_row['keys'])
subprocess.call(['/bin/bash', '-i', '-c', opts])
return
def encprime_per_pangene(org_name):
strains_fh = open(os.path.join(OUTPUTDIR, org_name, 'strains_plasmids_ncbiFTP.csv'))
unifiables_fh = open(os.path.join(OUTPUTDIR, org_name, 'unifiable_strains.csv'))
paralogs_fh = open(os.path.join(OUTPUTDIR, org_name, 'paralogs.tsv'))
strains = [row for row in csv.DictReader(strains_fh)]
unifiables = [row for row in csv.DictReader(unifiables_fh)]
unifiables_fh.seek(0)
unifiables_headers = csv.DictReader(unifiables_fh).fieldnames
thresholds = [header for header in unifiables_headers if re.search('0\.\d+', header)]
paralogs = list(set([el for row in csv.reader(paralogs_fh, delimiter='\t') for el in row]))
for threshold in thresholds:
threshold_str = '%se-%d' % (re.search('([1-9])', threshold).group(1), threshold.count('0'))
print threshold_str
output_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_corrected_stats.tsv' % (IDENTITYPCNT, threshold_str))
output_fh = open(output_fname, 'a')
bins = []
unified_genomes_dir = os.path.join(OUTPUTDIR, org_name, 'fasta_out_pangenome', threshold_str, 'unified')
for unified_genome_fname in os.listdir(unified_genomes_dir):
bins.append(re.search('(.*)\.ffn', unified_genome_fname).group(1))
n = len(os.listdir(unified_genomes_dir))
print n
enc = [[]]*n
enc_viral = [[]]*n
encprime = [[]]*n
encprime_viral = [[]]*n
pangenome_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_corrected_afterselfcmp.tsv' % (IDENTITYPCNT, threshold_str))
pangenome_fh = open(pangenome_fname, 'r')
for line in pangenome_fh:
row = line.strip().split('\t')
if len(row)>0:
if row[0] not in paralogs and re.search('(NC_\d+)\.\d', row[0].split('|')[3]):
viral_flag = False
enc_i, encprime_i = 0, 0
g_refs = [row[0]]
g_id = re.search('(NC_\d+)\.\d', row[0].split('|')[3]).group(1)
g_ids = [g_id]
if len(row)>1:
g_refs+=[g for g in row[1].strip().split(';') if g!='']
g_ids+=[re.search('(NC_\d+)\.\d', g.split('|')[3]).group(1) for g in g_refs[1:] if re.search('(NC_\d+)\.\d', g.split('|')[3])]
l_ids = []
for g_id in g_ids:
l_bins = ''
for b in bins:
if re.search(g_id, b):
l_bins = re.findall('NC_\d+', b)
st_name = ''
for strain in strains:
if strain['keys']==g_id:
st_name = '%s_%s_uid%s' % (org_name, strain['strain'], strain['uid'])
phispy_output_fname = os.path.join(PHISPYDIR, org_name, st_name, g_id, 'prophage_tbl.txt')
if l_bins=='' or not (g_id in l_bins and True in [b in l_ids for b in l_bins]):
if g_id not in l_ids:
l_ids.append(g_id)
'''add search for gene in prophage table generated by phispy'''
if os.path.isfile(phispy_output_fname):
phispy_output_fh = open(phispy_output_fname, 'r')
for line in phispy_output_fh:
'''3 - start, 4 - stop, 9 - final_status'''
res = line.strip().split('\t')
g_arr = g_refs[g_ids.index(g_id)].split('|')
pos = re.findall('\d+', g_arr[-1])
if res[3] in pos and res[4] in pos:
if res[9]=='1':
viral_flag = True
break
'''calculate average ENC, ENCprime'''
encprime_output_fname = os.path.join(OUTPUTDIR, org_name, 'encprime', st_name,
'%s.ffn.results' % re.search('\|(NC_\d+)\.\d\|', g_refs[g_ids.index(g_id)]).group(1))
for ln in open(encprime_output_fname, 'r'):
rw = ln.strip()
if re.search(g_refs[g_ids.index(g_id)].split('|')[-1], rw):
nums = re.findall('\d+\.\d+', rw)
enc_i+=float(nums[1])
encprime_i+=float(nums[2])
enc_v = enc_i/len(g_refs)
encprime_v = encprime_i/len(g_refs)
output_fh.write('%s\n' % '\t'.join([g_refs[0], str(len(g_refs)), str(len(l_ids)), str(viral_flag), str(enc_v), str(encprime_v)]))
return
def create_enc_hist(org_name):
unifiables_fh = open(os.path.join(OUTPUTDIR, org_name, 'unifiable_strains.csv'))
unifiables_headers = csv.DictReader(unifiables_fh).fieldnames
thresholds = [header for header in unifiables_headers if re.search('0\.\d+', header)]
fig, axarr = plt.subplots(2, len(thresholds), sharex='none')
width = 0.4
vals_enc_encp = [[] for i in range(len(thresholds))]
bins_enc_encp = [[] for i in range(len(thresholds))]
bins_viral_enc_encp = [[] for i in range(len(thresholds))]
for threshold in thresholds:
threshold_str = '%se-%d' % (re.search('([1-9])', threshold).group(1), threshold.count('0'))
print threshold_str
unified_genomes_dir = os.path.join(OUTPUTDIR, org_name, 'fasta_out_pangenome', threshold_str, 'unified')
n = len(os.listdir(unified_genomes_dir))
print n
bins_enc_encp[thresholds.index(threshold)] = [[[],[]] for i in range(n)]
bins_viral_enc_encp[thresholds.index(threshold)] = [[[],[]] for i in range(n)]
vals_enc_encp[thresholds.index(threshold)] = [[] for i in range(4)]
output_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_corrected_stats.tsv' % (IDENTITYPCNT, threshold_str))
output_fh = open(output_fname, 'r')
for line in output_fh:
'''0- pangene name; 1- n_orthologs; 2-n_genomes; 3-viral_flag; 4-enc; 5-encprime'''
row = line.strip().split('\t')
if row[3]=='True':
bins_viral_enc_encp[thresholds.index(threshold)][int(row[2])-1][0].append(float(row[4]))
bins_viral_enc_encp[thresholds.index(threshold)][int(row[2])-1][1].append(float(row[5]))
else:
bins_enc_encp[thresholds.index(threshold)][int(row[2])-1][0].append(float(row[4]))
bins_enc_encp[thresholds.index(threshold)][int(row[2])-1][1].append(float(row[5]))
for i in range(n):
vals_enc_encp[thresholds.index(threshold)][0].append(np.average(bins_enc_encp[thresholds.index(threshold)][i][0]))
vals_enc_encp[thresholds.index(threshold)][1].append(np.average(bins_viral_enc_encp[thresholds.index(threshold)][i][0]))
vals_enc_encp[thresholds.index(threshold)][2].append(np.average(bins_enc_encp[thresholds.index(threshold)][i][1]))
vals_enc_encp[thresholds.index(threshold)][3].append(np.average(bins_viral_enc_encp[thresholds.index(threshold)][i][1]))
'''
print np.average(bins_enc_encp[i][0]), np.average(bins_viral_enc_encp[i][0]), len(bins_enc_encp[i][0])*len(bins_viral_enc_encp[i][0]), \
stats.mannwhitneyu(bins_enc_encp[i][0], bins_viral_enc_encp[i][0]), \
'\t', np.average(bins_enc_encp[i][1]), np.average(bins_viral_enc_encp[i][1]), len(bins_enc_encp[i][1])*len(bins_viral_enc_encp[i][1]), \
stats.mannwhitneyu(bins_enc_encp[i][1], bins_viral_enc_encp[i][1])
'''
def autolabel(rects, t, k, u):
# label significant values
for rect in rects[0]:
height = max(rect.get_height(), rects[1][rects[0].index(rect)].get_height())
if u[rects[0].index(rect)][1]<=0.05:
print t, k, rect.get_height(), rects[1][rects[0].index(rect)].get_height()
axarr[k][t].text(rect.get_x()+rect.get_width(), 1.005*height, '*', ha='center', va='bottom')
colors = ('r', 'g', 'b', 'y', 'c', 'm')
series = ('non-viral', 'viral')
for i in range(len(thresholds)):
rects = [[], []]
x_arr = range(len(vals_enc_encp[i][0]))
axarr[0][i].set_title('%s\n(n=%d)' % (thresholds[i], len(vals_enc_encp[i][0])))
for j in range(4):
ind = np.arange(len(vals_enc_encp[i][j]))
w = width*(j%2)
c = colors[-1]
if j%2==0:
c = colors[i]
rects[j//2].append(axarr[j//2][i].bar(ind+w, tuple([val for val in vals_enc_encp[i][j]]), width, color=c))
for k in range(len(series)):
axarr[k][i].set_xlim(-0.5*width,len(x_arr)+0.5*width)
axarr[k][i].set_ylim(min([rect.get_height() for rect in rects[k][0]+rects[k][1]])*0.98,
max([rect.get_height() for rect in rects[k][0]+rects[k][1]])*1.02)
axarr[k][i].grid(True)
axarr[k][i].legend((rects[k][0], rects[k][1]), series, loc=3)
u = [stats.mannwhitneyu(bins_enc_encp[i][m][k], bins_viral_enc_encp[i][m][k]) for m in range(len(x_arr))]
autolabel(rects[k], i, k, u)
axarr[k][i].set_xticks([x+width for x in x_arr])
axarr[k][i].set_xticklabels(tuple([str(x+1) if (x==0 or (x+1)%5==0 or (x==len(x_arr)-1 and (x+1)%5>1)) else '' for x in x_arr]))
axarr[0][0].set_ylabel('ENC')
axarr[1][0].set_ylabel('ENCprime')
plt.show()
return
def calculate_fop(org_name):
strains_fh = open(os.path.join(OUTPUTDIR, org_name, 'strains_plasmids_ncbiFTP.csv'))
strains = [row for row in csv.DictReader(strains_fh)]
for strain_row in strains:
print '_'.join([org_name, strain_row['strain'], 'uid%s' % strain_row['uid']])
strain_fname = os.path.join(FFNDIR,
'_'.join([org_name, strain_row['strain'], 'uid%s' % strain_row['uid']]),
'%s.ffn' % strain_row['keys'])
fop_output_dir = os.path.join(OUTPUTDIR, org_name, 'fop', '_'.join([org_name, strain_row['strain'], 'uid%s' % strain_row['uid']]))
output_fname = os.path.join(fop_output_dir, '%s.fop.out' % strain_row['keys'])
if not os.path.isdir(fop_output_dir):
os.mkdir(fop_output_dir)
output_fh = open(output_fname, 'a')
for gene in SeqIO.parse(open(strain_fname, 'r'), 'fasta'):
print gene.name, len(gene.seq)
count_opt, count_tot = 0, 0
for i in range(0, len(gene.seq), 3):
#print gene.seq[i:i+3], [key for key in AADICT.keys() if (str(gene.seq[i:i+3]) in AADICT[key] or str(gene.seq[i:i+3]).swapcase() in AADICT[key])]
if len([key for key in AADICT.keys() if (str(gene.seq[i:i+3]) in AADICT[key] or str(gene.seq[i:i+3]).swapcase() in AADICT[key])])>0:
count_tot+=1
'''print [key_opt for key_opt in OPTDICT[org_name].keys() if (str(gene.seq[i:i+3]) in OPTDICT[org_name][key_opt] or \
str(gene.seq[i:i+3]).swapcase() in OPTDICT[org_name][key_opt])]'''
if len([key_opt for key_opt in OPTDICT[org_name].keys() if (str(gene.seq[i:i+3]) in OPTDICT[org_name][key_opt] or \
str(gene.seq[i:i+3]).swapcase() in OPTDICT[org_name][key_opt])])>0:
count_opt+=1
'''0- gene_name; 1-gene_length; 2-n_optimal_codons; 3- n_optimal_nonoptimal_codons; 5-fop'''
output_fh.write('%s\n' % '\t'.join([str(itm) for itm in [gene.name, len(gene.seq), count_opt, count_tot, float(count_opt)/count_tot]]))
#print count_opt, count_tot, float(count_opt)/count_tot
return
def plot_fop(org_name):
strains_fh = open(os.path.join(OUTPUTDIR, org_name, 'strains_plasmids_ncbiFTP.csv'))
unifiables_fh = open(os.path.join(OUTPUTDIR, org_name, 'unifiable_strains.csv'))
paralogs_fh = open(os.path.join(OUTPUTDIR, org_name, 'paralogs.tsv'))
strains = [row for row in csv.DictReader(strains_fh)]
unifiables = [row for row in csv.DictReader(unifiables_fh)]
unifiables_fh.seek(0)
unifiables_headers = csv.DictReader(unifiables_fh).fieldnames
thresholds = [header for header in unifiables_headers if re.search('0\.\d+', header)]
paralogs = list(set([el for row in csv.reader(paralogs_fh, delimiter='\t') for el in row]))
for threshold in thresholds:
threshold_str = '%se-%d' % (re.search('([1-9])', threshold).group(1), threshold.count('0'))
print threshold_str
bins = []
unified_genomes_dir = os.path.join(OUTPUTDIR, org_name, 'fasta_out_pangenome', threshold_str, 'unified')
for unified_genome_fname in os.listdir(unified_genomes_dir):
bins.append(re.search('(.*)\.ffn', unified_genome_fname).group(1))
n = len(os.listdir(unified_genomes_dir))
print n
pangenome_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_afterselfcmp.tsv' % (IDENTITYPCNT, threshold_str))
pangenome_fh = open(pangenome_fname, 'r')
stats_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_stats.tsv' % (IDENTITYPCNT, threshold_str))
stats_fh = open(stats_fname, 'r')
for line_p in pangenome_fh:
row_p = line_p.strip().split('\t')
if len(row_p)>0:
if row_p[0] not in paralogs and re.search('(NC_\d+)\.\d', row_p[0].split('|')[3]):
line_s = next(stats_fh)
row_s = line_s.strip().split('\t')
g_refs = [row_p[0]]
g_id = re.search('(NC_\d+)\.\d', row_p[0].split('|')[3]).group(1)
g_ids = [g_id]
if len(row_p)>1:
g_refs+=[g for g in row_p[1].strip().split(';') if g!='']
g_ids+=[re.search('(NC_\d+)\.\d', g.split('|')[3]).group(1) for g in g_refs[1:] if re.search('(NC_\d+)\.\d', g.split('|')[3])]
fop_arr = []
#print len(g_ids)
for g_ix, g_id in enumerate(g_ids):
g_ref = g_refs[g_ix]
st_name = ''
for strain in strains:
if strain['keys']==g_id:
st_name = '_'.join([org_name, strain['strain'], 'uid%s' % strain['uid']])
fop_output_dir = os.path.join(OUTPUTDIR, org_name, 'fop', st_name)
fop_fname = os.path.join(fop_output_dir, '%s.fop.out' % g_id)
fop_fh = open(fop_fname, 'r')
for line in fop_fh:
fop_row = line.strip().split('\t')
if fop_row[0]==g_ref:
fop_arr.append(float(fop_row[4]))
print int(row_s[2]), np.average(fop_arr), np.std(fop_arr), row_s[0]==row_p[0]
if int(row_s[2])==1 and np.std(fop_arr)>0:
print row_p
return
if row_s[0]!=row_p[0]:
print row_s[0], row_p[0]
return
return
def get_highly_expressed_genes(org_name):
strains_fh = open(os.path.join(OUTPUTDIR, org_name, 'strains_plasmids_ncbiFTP.csv'))
unifiables_fh = open(os.path.join(OUTPUTDIR, org_name, 'unifiable_strains.csv'))
strains = [row for row in csv.DictReader(strains_fh)]
unifiables = [row for row in csv.DictReader(unifiables_fh)]
unifiables_fh.seek(0)
unifiables_headers = csv.DictReader(unifiables_fh).fieldnames
thresholds = [header for header in unifiables_headers if re.search('0\.\d+', header)]
for threshold in thresholds:
threshold_str = '%se-%d' % (re.search('([1-9])', threshold).group(1), threshold.count('0'))
print threshold_str
pangenome_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_summary_identity%d_fluidity%s_afterselfcmp.tsv' % (IDENTITYPCNT, threshold_str))
pangenome_fh = open(pangenome_fname, 'r')
pangenome_heg_fname = os.path.join(OUTPUTDIR, org_name, 'pangenome_identity%d_fluidity%s_heg.ffn' % (IDENTITYPCNT, threshold_str))
if not os.path.isfile(pangenome_heg_fname):
pangenome_heg_fh = open(pangenome_heg_fname, 'a')
heg_records = []
found_rows = []
for strain in strains:
print strain['strain']
gbk_fname = os.path.join(GBKDIR, '%s_%s_uid%s' % (org_name, strain['strain'], strain['uid']), '%s.gbk' % strain['keys'])
gbk_fh = SeqIO.parse(open(gbk_fname, 'r'), 'genbank')
for record in gbk_fh:
for feature in record.features:
for k in feature.qualifiers.keys():
for term in ['ribosomal', 'elongation factor']:
if [re.search(term, feature.qualifiers[k][j]) for j in range(len(feature.qualifiers[k]))]!=[None]*len(feature.qualifiers[k]):
pangenome_fh.seek(0)
found_flag = False
for line in pangenome_fh:
row = line.strip().split('\t')
if len(row)>0 and row not in found_rows and len(row[0].split('|'))==4:
g_refs = [row[0]]
#print row[0].split('|')
g_id = re.search('(NC_\d+)\.\d', row[0].split('|')[3]).group(1)
g_ids = [g_id]
if len(row)>1:
g_refs+=[g for g in row[1].strip().split(';') if g!='']
g_ids+=[re.search('(NC_\d+)\.\d', g.split('|')[3]).group(1) for g in g_refs[1:] if re.search('(NC_\d+)\.\d', g.split('|')[3])]
for g_id in g_ids:
g_arr = g_refs[g_ids.index(g_id)].split('|')
pos = [int(p) for p in re.findall('\d+', g_arr[-1])]
#print pos
if min(pos) in range(feature.location.start-1, feature.location.start+2) \
and max(pos) in range(feature.location.end-1, feature.location.end+2):
heg_record = SeqRecord.SeqRecord(feature.extract(record.seq),
id=g_refs[g_ids.index(g_id)],
name=g_refs[g_ids.index(g_id)],
description=g_refs[g_ids.index(g_id)])
if heg_record.id not in [rec.id for rec in heg_records]:
print feature.location, '\n', feature.qualifiers
print pos, '\n', feature.extract(record.seq)
heg_records.append(heg_record)
found_rows.append(row)
SeqIO.write(heg_record, pangenome_heg_fh, 'fasta')
found_flag = True
break
if found_flag==True:
break
print sum([1 for record in heg_records])
return
def get_annotations(directory):
org_name = directory.split('/')[-1]
csv.field_size_limit(sys.maxsize)
#input_csv = csv.reader(open(os.path.join(directory, 'temp', 'genes_summary_withViralFlag.txt'), 'r'))
#summary_df = pandas.DataFrame.from_csv(os.path.join(directory, 'temp', 'genes_summary_withViralFlag.txt'), index_col=None)
viral_df = pandas.DataFrame.from_csv(os.path.join(directory, 'temp', 'genes_summary_hist_cutoff90_noplasmids_nophages.txt'), index_col=None)
core_output_csv = csv.writer(open(os.path.join(directory, 'temp', 'core_genes_annotation_cutoff90_noplasmids_nophages.txt'), 'w'))
acquired_output_csv = csv.writer(open(os.path.join(directory, 'temp', 'acquired_genes_annotation_cutoff90_noplasmids_nophages.txt'), 'w'))
for idx, pcnt in enumerate(viral_df['%unique_genomes']):
output_flag = False
if viral_df['viral_flag'][idx]==1:
if pcnt < 0.11:
print viral_df['id'][idx], viral_df['name'][idx], pcnt
output_csv = acquired_output_csv
output_flag = True
elif pcnt > 0.89:
print viral_df['id'][idx], viral_df['name'][idx], pcnt
output_csv = core_output_csv
output_flag = True
if output_flag==True:
output_csv.writerow([viral_df['id'][idx], viral_df['name'][idx], '{:.2f}'.format(pcnt)])
pangene_id = viral_df['id'][idx]
pangene_name = viral_df['name'][idx]
filename = None
filepath = os.path.join(directory, '%s_%s_refGenome.gb' % (org_name, pangene_id))
if os.path.isfile(filepath):
filename = filepath
else:
if re.search('[A-Z]{1,2}\w+\.\d', pangene_id):
filepath = os.path.join(directory, '%s_%s_%s_refGenome.gb' % (org_name, re.search('([A-Z]{1,2})\w+\.\d', pangene_id).group(1),
re.search('[A-Z]{1,2}(\w+\.\d)', pangene_id).group(1)))
if os.path.isfile(filepath):
filename = filepath
else:
print 'no such file: %s' % filepath
return
pangene_gbfile = SeqIO.parse(open(filename, 'r'), 'genbank')
for record in pangene_gbfile:
#print record.features[3064]
#return
counter = 0
for feature in record.features:
if feature.type=='CDS':
counter+=1
if ('gene' in feature.qualifiers.keys() and feature.qualifiers['gene'][0]==re.search('(\w+\d?)_\d+', pangene_name).group(1)) \
or counter==int(re.search('\w+\d?_(\d+)', pangene_name).group(1)):
#print ';'.join(feature.qualifiers['gene']), '\t', ';'.join(feature.qualifiers['product']), '\t', ';'.join(feature.qualifiers['note'])
output = [';'.join(feature.qualifiers[key]) for key in ['gene', 'product', 'note'] if key in feature.qualifiers.keys()]
if output:
print '\t'.join([';'.join(feature.qualifiers[key]) for key in ['gene', 'product'] if key in feature.qualifiers.keys()])
output_csv.writerow(output)
else:
output_csv.writerow(feature)
return
if __name__=="__main__":
org_dirs = {'Escherichia_coli': []}
org_folders = sorted([name for name in os.listdir(GBKDIR) if os.path.isdir(os.path.join(GBKDIR, name))])
for query_org in org_dirs.keys():
print query_org
strains_csv = os.path.join(OUTPUTDIR, query_org, 'strains_plasmids_ncbiFTP.csv')
unifiable_csv = os.path.join(OUTPUTDIR, query_org, 'unifiable_strains.csv')
for org_folder in sorted(org_folders):
if re.search(query_org, org_folder):
org_dirs[query_org].append(org_folder)
'''
gb2seed(query_org, strains_csv)
runPhiSpy(query_org, strains_csv)
'''
#createPanGenome(query_org, strains_csv, unifiable_csv)
#pangenome_self_compare(query_org)
'''
hist_specs = os.path.join(OUTPUTDIR, query_org, 'hist_results.csv')
if not os.path.isfile(hist_specs):
hist_specs = create_hist(query_org, strains_csv, unifiable_csv)
pangenes, pangenes_viral, percentages = [], [], []
for line in open(hist_specs, 'r'):
row = line.strip().split('\t')
pangenes.append([int(el) for el in row[0].split(',')])
pangenes_viral.append([int(el) for el in row[1].split(',')])
percentages.append([float(el) for el in row[2].split(',')])
print len(pangenes), pangenes
print len(pangenes_viral), pangenes_viral
print len(percentages), percentages
'''
#plot_hist(pangenes, pangenes_viral, percentages)
#calculate_enc(query_org, 'SeqCount_mod')
#calculate_enc(query_org, 'ENCprime')
#encprime_per_pangene(query_org)
create_enc_hist(query_org)
#calculate_fop(query_org)
#plot_fop(query_org)
| mit |
PublicHealthEngland/pygom | doc/source/conf.py | 1 | 9311 | # -*- coding: utf-8 -*-
#
# This file is execfile() with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
#slight hack for graphvis on windows to ensure conda path is correct
if sys.platform == 'win32':
os.environ['PATH'] += os.pathsep + os.environ['CONDA_PREFIX'] + r'\Library\bin\graphviz'
import sphinx
if sphinx.__version__ < '1.4.1':
raise RuntimeError("Sphinx 1.4.1 or newer required")
import pygom
needs_sphinx = '1.4.1'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('sphinxext'))
#sys.path.append(os.path.abspath('../pygom'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.mathjax', # 'sphinx.ext.imgmath',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.graphviz',# 'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'numpydoc',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
# the mapping for code in other packages
intersphinx_mapping = {'matplotlib': ('http://matplotlib.org/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'python': ('https://docs.python.org/2', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'sympy': ('http://docs.sympy.org/latest/', None)}
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PyGOM Documentation'
copyright = '2015-2019, Public Health England'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pygom.__version__
# The full version, including alpha/beta/rc tags.
release = pygom.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
#Set the directory to save figures in
ipython_savefig_dir = 'savefig'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#['alabaster',sphinx_rtd_theme','classic','sphinxdoc','scrolls','agogo',
# 'traditional','nature','haiku','pyramid','bizstyle']
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
# RTD will time out if we try to build the whole of the documentation so
# ignore some of the longer bits and perhaps add them later
exclude_patterns = ['common_models/*.rst',
# 'bvpSimple.rst',
'epi.rst',
# 'estimate1.rst',
'estimate2.rst',
'gradient.rst',
'epijson.rst',
'fh.rst',
# 'getting_started.rst',
'initialGuess.rst',
'profile.rst',
'sir.rst',
# 'stochastic.rst',
# 'transition.rst'
]
# html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyGOM Documentation'
html_add_permalinks = ''
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyGOM.tex', 'PyGOM Documentation',
'Edwin Tye', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = '\\usepackage{amsmath,amssymb}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# latex_encodings = 'utf-8'
warnings.filterwarnings("ignore", category=UserWarning,
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
| gpl-2.0 |
hitszxp/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
kevin-intel/scikit-learn | sklearn/utils/tests/test_deprecation.py | 19 | 1384 | # Authors: Raghav RV <[email protected]>
# License: BSD 3 clause
import pickle
from sklearn.utils.deprecation import _is_deprecated
from sklearn.utils.deprecation import deprecated
from sklearn.utils._testing import assert_warns_message
@deprecated('qwerty')
class MockClass1:
pass
class MockClass2:
@deprecated('mockclass2_method')
def method(self):
pass
class MockClass3:
@deprecated()
def __init__(self):
pass
class MockClass4:
pass
@deprecated()
def mock_function():
return 10
def test_deprecated():
assert_warns_message(FutureWarning, 'qwerty', MockClass1)
assert_warns_message(FutureWarning, 'mockclass2_method',
MockClass2().method)
assert_warns_message(FutureWarning, 'deprecated', MockClass3)
val = assert_warns_message(FutureWarning, 'deprecated',
mock_function)
assert val == 10
def test_is_deprecated():
# Test if _is_deprecated helper identifies wrapping via deprecated
# NOTE it works only for class methods and functions
assert _is_deprecated(MockClass1.__init__)
assert _is_deprecated(MockClass2().method)
assert _is_deprecated(MockClass3.__init__)
assert not _is_deprecated(MockClass4.__init__)
assert _is_deprecated(mock_function)
def test_pickle():
pickle.loads(pickle.dumps(mock_function))
| bsd-3-clause |
maminian/skewtools | scripts/animate_ellipse_flow_mc_projs.py | 1 | 6516 | #!/usr/bin/python
import numpy as np
import pylab
from pylab import *
import matplotlib.pyplot as pyplot
import matplotlib.animation as anim
import h5py
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import gridspec
# ------------------------------------------------
def update(i,axXY,axXZ,axYZ,axSk,x,y,z,Sk,t,aratio):
# x-bounds for the axes.
maxx = max(abs(x[i,:]))
# Change the current axis, clear it, and update it with the new
# scatter info.
pyplot.sca(axXY)
cla()
axXY.scatter(x[i,:], y[i,:], s=5, marker='.', alpha=0.2)
pylab.xlim([-maxx,maxx]) # X axis
pylab.ylim([-1.,1.]) # Y axis
pyplot.title('XY projection')
# Repeat for the other two projections.
pyplot.sca(axXZ)
cla()
axXZ.scatter(x[i,:], z[i,:], s=5, marker='.', alpha=0.2)
pylab.xlim([-maxx,maxx]) # X axis
pylab.ylim([-1./aratio,1./aratio]) # Z axis
pyplot.title('XZ projection')
# For the YZ projection, draw the unit circle as well.
pyplot.sca(axYZ)
cla()
axYZ.scatter(y[i,:], z[i,:], s=5, marker='.', alpha=0.2)
hold(True)
th = linspace(0,2*pi,361)
axYZ.plot(cos(th),1./aratio*sin(th),color='red')
hold(False)
pylab.xlim([-1.,1.]) # Y axis
pylab.ylim([-1./aratio,1./aratio]) # Z axis
pyplot.title('YZ projection')
# For the skewness, the plot stays the same, but we need to
# re-draw the plot with a new position for the red line.
pyplot.sca(axSk)
cla()
axSk.plot(t,Sk)
axSk.set_xscale('log')
axSk.grid(True)
hold(True)
axSk.plot([t[i],t[i]],[min(Sk),max(Sk)],color='red')
hold(False)
pyplot.title('Skewness over time')
return axXY,axXZ,axYZ,axSk
# end update
def update_slow(i,axXY,axXZ,axYZ,axSk,walks,Sk,t,aratio):
print i
x = walks['X'][:,i]
y = walks['Y'][:,i]
z = walks['Z'][:,i]
# x-bounds for the axes.
# maxx = max(abs(x).max(),maxx)
# Change the current axis, clear it, and update it with the new
# scatter info.
pyplot.sca(axXY)
cla()
axXY.hist2d(x, y,bins=201)
# pylab.xlim([-maxx,maxx]) # X axis
pylab.ylim([-1.,1.]) # Y axis
pyplot.title('XY projection')
# Repeat for the other two projections.
pyplot.sca(axXZ)
cla()
axXZ.hist2d(x, z,bins=201)
# pylab.xlim([-maxx,maxx]) # X axis
pylab.ylim([-1./aratio,1./aratio]) # Z axis
pyplot.title('XZ projection')
# For the YZ projection, draw the unit circle as well.
pyplot.sca(axYZ)
cla()
axYZ.hist2d(y, z,bins=201)
hold(True)
th = linspace(0,2*pi,361)
axYZ.plot(cos(th),1./aratio*sin(th),color='red')
hold(False)
pylab.xlim([-1.,1.]) # Y axis
pylab.ylim([-1./aratio,1./aratio]) # Z axis
pyplot.title('YZ projection')
# For the skewness, the plot stays the same, but we need to
# re-draw the plot with a new position for the red line.
pyplot.sca(axSk)
cla()
axSk.plot(t,Sk)
axSk.set_xscale('log')
axSk.grid(True)
hold(True)
axSk.plot([t[i],t[i]],[min(Sk),max(Sk)],color='red')
hold(False)
pyplot.title('Skewness over time')
return axXY,axXZ,axYZ,axSk
# end update_slow
def animate_mc(x,y,z,Sk,t,aratio):
fig = pyplot.figure()
# Generate a subplot with the particle animation on top
# and an animated tracker of the skewness over time
# on the bottom.
gs = gridspec.GridSpec(2,2, height_ratios=[1,1], width_ratios=[1,1])
axXY = fig.add_subplot(gs[0])
axXZ = fig.add_subplot(gs[1])
axYZ = fig.add_subplot(gs[2])
axSk = fig.add_subplot(gs[3])
axXY.scatter(x[0,:], y[0,:], s=5, marker='.',alpha=0.2)
axXZ.scatter(x[0,:], z[0,:], s=5, marker='.',alpha=0.2)
axYZ.scatter(y[0,:], z[0,:], s=5, marker='.',alpha=0.2)
th = linspace(0,2*pi,361)
axYZ.plot(cos(th),1./aratio*sin(th),color='red')
axSk.plot(t,Sk)
# delay = 1000./(fps)
ani = anim.FuncAnimation( fig, update, frames=size(t),
fargs=(axXY,axXZ,axYZ,axSk,x,y,z,Sk,t,aratio),repeat=False)
return ani
# --------------------------------------------
def animate_mc_slow(walks):
fig = pyplot.figure()
# Generate a subplot with the particle animation on top
# and an animated tracker of the skewness over time
# on the bottom.
t = walks['Time'].value
Sk = walks['Avgd_Skewness'].value
aratio = walks['aratio'].value
gs = gridspec.GridSpec(2,2, height_ratios=[1,1], width_ratios=[1,1])
axXY = fig.add_subplot(gs[0])
axXZ = fig.add_subplot(gs[1])
axYZ = fig.add_subplot(gs[2])
axSk = fig.add_subplot(gs[3])
axXY.scatter(walks['X'][:,0], walks['Y'][:,0], s=5, marker='.',alpha=0.2)
axXZ.scatter(walks['X'][:,0], walks['Z'][:,0], s=5, marker='.',alpha=0.2)
axYZ.scatter(walks['Y'][:,0], walks['Z'][:,0], s=5, marker='.',alpha=0.2)
th = linspace(0,2*pi,361)
axYZ.plot(cos(th),1./aratio*sin(th),color='red')
axSk.plot(t,Sk)
# delay = 1000./(fps)
# ani = anim.FuncAnimation( fig, update, frames=size(t),
# fargs=(axXY,axXZ,axYZ,axSk,x,y,z,Sk,t,aratio),repeat=False)
ani = anim.FuncAnimation( fig, update_slow, frames=size(t),
fargs=(axXY,axXZ,axYZ,axSk,walks,Sk,t,aratio),repeat=False)
return ani
# --------------------------------------------
walks = h5py.File(sys.argv[1])
print "Constructing animation..."
# Need to split this into two versions, depending on whether I have the memory capacity
# to store the entire position histories.
if False:
X = transpose(walks['X'].value)
Y = transpose(walks['Y'].value)
Z = transpose(walks['Z'].value)
Sk = transpose(walks['Avgd_Skewness'].value)
t = transpose(walks['Time'].value)
aratio = walks['aratio'].value
ani = animate_mc(X,Y,Z,Sk,t,aratio)
else:
ani = animate_mc_slow(walks)
# end if
# Show the animation, or write it to a movie file.
if True:
print "Saving animation to disk (may take quite a while)..."
#dpi=200 gives decent quality for the filesize. Takes about 5-10 minutes to make.
ani.save(sys.argv[2],dpi=120,writer=anim.FFMpegWriter(fps=24))
else:
pyplot.show(block=False)
#
#walks.close()
print "Done."
| gpl-3.0 |
joristork/milovision | calibration/calibrate.py | 1 | 4188 | #!/usr/bin/env python
#
# Milovision: A camera pose estimation programme
#
# Copyright (C) 2013 Joris Stork
# See LICENSE.txt
#
# calibrate.py
"""
:synopsis: Estimates the camera's intrinsic parameters and writes these to
file. NB: currently requires a 9x6 chessboard (nr. inner points).
This module is partly inspired from example code in:
“Computer vision with the OpenCV library”, in: Gary Bradski
& Adrian Kaebler - OReilly (2008).
.. moduleauthor:: Joris Stork <[email protected]>
"""
__author__ = "Joris Stork"
# standard library and third party packages
import sys
import signal
import logging
import time
from pydc1394 import DC1394Library, Camera
from pydc1394.cmdline import add_common_options, handle_common_options
import cv2
import matplotlib.pyplot as plt
import numpy as np
# milovision modules
import loginit
import argparse
cam = None
def signal_handler(signal, frame):
""" ensures a clean exit on receiving a ctrl-c """
print '\n\nctrl-c received. Shutting down camera and exiting.'
global cam
cam.stop()
sys.exit(0)
def main():
"""
See module synopsis. Passes image stream from camera driver to OpenCV's
chessboard corner finder until a threshold number of point correspondence
sets are achieved. Passes these sets to OpenCV/CalibrateCamera2, and writes
the resulting estimate for the camera intrinsics to file using Numpy's save
function.
"""
#p = optparse.OptionParser(usage="Usage: %prog [ options ]\n"
# "This program lets the camera run in free running mode.")
options, args = argparse.run()
loginit.run(options.verbosity)
logger = logging.getLogger('main')
#add_common_options(p)
l = DC1394Library()
global cam
cam = handle_common_options(options, l)
try:
cam.start(interactive = True)
except IOError:
print 'error: cannot open stream'
exit(1)
dims = (9,6)
pts_per_board = dims[0] * dims[1]
nr_samples = 20
pt_counts = np.zeros((nr_samples, 1), dtype = int) #pts per image
frame = np.asarray(cam.current_image)
model_pts = np.zeros((nr_samples * pts_per_board, 3), dtype = float)
model_pts = model_pts.astype('float32')
image_pts = np.zeros((nr_samples * pts_per_board, 2), dtype = float)
image_pts = image_pts.astype('float32')
i = 0
while i < nr_samples:
frame = np.asarray(cam.current_image)
found, points = cv2.findChessboardCorners(frame, dims, flags=cv2.CALIB_CB_FAST_CHECK)
if found and ((points.shape)[0] == pts_per_board):
cv2.drawChessboardCorners(frame, (6,9), points, found)
cv2.imshow("win2", frame)
cv2.waitKey(2)
step = i * pts_per_board
j = 0
while j < pts_per_board:
image_pts[step, 0] = points[j, 0, 0]
image_pts[step, 1] = points[j, 0, 1]
model_pts[step, 0] = float(j) / float(dims[0])
model_pts[step, 1] = float(j) % float(dims[0])
model_pts[step, 2] = 0.0
step += 1
j += 1
pt_counts[i, 0] = pts_per_board
cv2.waitKey(2)
i += 1
time.sleep(1)
else:
cv2.imshow("win2", frame)
cv2.waitKey(2)
camera_matrix = np.array([
[2.23802515e+03, 0.0, 5.89782959e+02],
[0.0, 2.07124146e+03, 4.55921570e+02],
[0.0, 0.0, 1.]
])
dist_coeffs = np.zeros(4)
np.save("image_pts.npy", image_pts)
np.save("model_pts.npy", model_pts)
success, intrinsic, distortion_coeffs, rot_est_vecs, transl_est_vecs = cv2.calibrateCamera(model_pts, image_pts, frame.shape, camera_matrix, dist_coeffs, flags=cv2.CALIB_USE_INTRINSIC_GUESS)
np.save("intrinsic.npy", intrinsic)
np.save("distortion_coeffs.npy", distortion_coeffs)
np.save("calibration_rotation_vectors.npy", rot_est_vecs)
np.save("calibration_translation_vectors.npy", transl_est_vecs)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
| mit |
erikness/AlephOne | zipline/protocol.py | 1 | 14519 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import iteritems, iterkeys
import pandas as pd
from . utils.protocol_utils import Enum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = ['id', 'payment_sid', 'cash_amount', 'share_count']
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be a security identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def _get_state(self):
return 'Account', self.__dict__
def _set_state(self, saved_state):
self.__dict__.update(saved_state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price'):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = hst
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
return cls._history_cache[field][self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_bars(days):
return days
@with_environment()
def minute_get_bars(days, env=None):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
return self._get_buffer(self._get_bars(days)).mean()
def stddev(self, days):
return self._get_buffer(self._get_bars(days)).std(ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
prices = self._get_buffer(bars)
vols = self._get_buffer(bars, field='volume')
return (prices * vols).sum() / vols.sum()
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
| apache-2.0 |
ryfeus/lambda-packs | Pandas_numpy/source/pandas/plotting/_core.py | 1 | 99305 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
import re
from collections import namedtuple
from distutils.version import LooseVersion
import numpy as np
from pandas.util._decorators import cache_readonly
from pandas.core.base import PandasObject
from pandas.core.config import get_option
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.dtypes.common import (
is_list_like,
is_integer,
is_number,
is_hashable,
is_iterator)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.compat import range, lrange, map, zip, string_types
import pandas.compat as compat
from pandas.io.formats.printing import pprint_thing
from pandas.util._decorators import Appender
from pandas.plotting._compat import (_mpl_ge_1_3_1,
_mpl_ge_1_5_0,
_mpl_ge_2_0_0)
from pandas.plotting._style import (plot_params,
_get_standard_colors)
from pandas.plotting._tools import (_subplots, _flatten, table,
_handle_shared_axes, _get_all_lines,
_get_xlim, _set_ticks_props,
format_date_labels)
try:
from pandas.plotting import _converter
except ImportError:
pass
else:
if get_option('plotting.matplotlib.register_converters'):
_converter.register(explicit=True)
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _gca(rc=None):
import matplotlib.pyplot as plt
with plt.rc_context(rc):
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
_converter._WARN = False
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1 and
not is_list_like(self.kwds['color'])):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds and isinstance(self.kwds['color'], tuple) and
self.nseries == 1 and len(self.kwds['color']) in (3, 4)):
# support RGB and RGBA tuples in series plot
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError(
"Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
# TODO: unused?
# if self.sort_columns:
# columns = _try_sort(data.columns)
# else:
# columns = data.columns
for col, values in data.iteritems():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
numeric_data = data.select_dtypes(include=[np.number,
"datetime",
"datetimetz",
"timedelta"])
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@staticmethod
def mpl_ge_1_3_1():
return _mpl_ge_1_3_1()
@staticmethod
def mpl_ge_1_5_0():
return _mpl_ge_1_5_0()
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, Index):
x = x._mpl_repr()
if is_errorbar:
if 'xerr' in kwds:
kwds['xerr'] = np.array(kwds.get('xerr'))
if 'yerr' in kwds:
kwds['yerr'] = np.array(kwds.get('yerr'))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if _any_not_none(*name):
name = ','.join([pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [ax for ax in self.axes[0].get_figure().get_axes()
if isinstance(ax, Subplot)]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = 'single'
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + ' requires and x and y column')
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires x column to be numeric')
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires y column to be numeric')
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
class ScatterPlot(PlanePlot):
_kind = 'scatter'
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError('Specify exactly one of `c` and `color`')
elif c is None and color is None:
c_values = self.plt.rcParams['patch.facecolor']
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if self.mpl_ge_1_3_1():
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values,
linestyle='none', **err_kwds)
class HexBinPlot(PlanePlot):
_kind = 'hexbin'
def __init__(self, data, x, y, C=None, **kwargs):
super(HexBinPlot, self).__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = 'line'
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
from pandas.plotting._timeseries import _use_dynamic_x
return _use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
from pandas.plotting._timeseries import _maybe_convert_index
data = _maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = _any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds)
self._add_legend_handle(newlines[0], label, index=i)
if not _mpl_ge_2_0_0():
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, **kwds):
# column_num is used to get the target column from protf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._timeseries import (_maybe_resample,
_decorate_axes,
format_dateaxis)
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = _maybe_resample(data, ax, kwds)
# Set ax with freq info
_decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, 'left_ax'):
_decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, 'right_ax'):
_decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax, stacking_id, n):
if stacking_id is None:
return
if not hasattr(ax, '_stacker_pos_prior'):
ax._stacker_pos_prior = {}
if not hasattr(ax, '_stacker_neg_prior'):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax, stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, '_stacker_pos_prior'):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError('When stacked is True, each column must be either '
'all positive or negative.'
'{0} contains both positive and negative values'
.format(label))
@classmethod
def _update_stacker(cls, ax, stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
condition = (not self._use_dynamic_x() and
data.index.is_all_dates and
not self.subplots or
(self.subplots and self.sharex))
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = 'area'
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, is_errorbar=False, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
if cls.mpl_ge_1_5_0():
line_kwds.pop('label')
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if 'color' not in kwds:
kwds['color'] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect] if cls.mpl_ge_1_5_0() else lines
return res
def _add_legend_handle(self, handle, label, index=None):
if not self.mpl_ge_1_5_0():
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(),
alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = 'bar'
_default_rot = 90
orientation = 'vertical'
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log', False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds['color'] = colors
else:
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label,
log=self.log, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = 'barh'
_default_rot = 0
orientation = 'horizontal'
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class HistPlot(LinePlot):
_kind = 'hist'
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = (self.data._convert(datetime=True)._get_numeric_data())
values = np.ravel(values)
values = values[~isna(values)]
hist, self.bins = np.histogram(
values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + \
cls._get_stacked_values(ax, stacking_id, base, kwds['label'])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds['label'] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds['style'] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i,
stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds['bottom'] = self.bottom
kwds['bins'] = self.bins
return kwds
def _post_plot_logic(self, ax, data):
if self.orientation == 'horizontal':
ax.set_xlabel('Frequency')
else:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
_kind = 'kde'
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
@classmethod
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
y = remove_na_arraylike(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
gkde = gaussian_kde(y)
if bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds['bw_method'] = self.bw_method
kwds['ind'] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_kind = 'pie'
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(
num_colors=len(self.data), color_kwds='colors')
self.kwds.setdefault('colors', colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for
l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_kind = 'box'
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type='axes', **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError(
"return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == 'dict':
return bp, bp
elif return_type == 'both':
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid "
"key '{0}' "
"The key must be either {1}"
.format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
from pandas.core.series import Series
self._return_obj = Series()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=i,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=0,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self._iter_data()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh',
'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot,
ScatterPlot, HexBinPlot, AreaPlot, PiePlot]
_plot_klass = {}
for klass in _klasses:
_plot_klass[klass._kind] = klass
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df',
klass_kind=df_kind, klass_coord=df_coord,
klass_ax=df_ax, klass_unique=df_unique,
klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s',
klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
*New in version 0.17.0:* Each plot kind has a corresponding method on the
``%(klass)s.plot`` accessor:
``%(klass_obj)s.plot(kind='line')`` is equivalent to
``%(klass_obj)s.plot.line()``.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False,
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {None, 'axes', 'dict', 'both'}, default None
The kind of object to return. The default is ``axes``
'axes' returns the matplotlib axes the boxplot is drawn on;
'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned, unless ``return_type`` is None, in which case a NumPy
array of axes is returned with the same shape as ``layout``.
See the prose documentation for more.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis='both', labelsize=fontsize)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if return_type is None:
return_type = 'axes'
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if ax is None:
rc = {'figure.figsize': figsize} if figsize is not None else {}
ax = _gca(rc)
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None,
return_type=None, **kwds):
import matplotlib.pyplot as plt
_converter._WARN = False
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms
bins : integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(_try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
_converter._WARN = False
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning, stacklevel=4)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None,
return_type=None, **kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
from pandas.core.series import Series
result = Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
class BasePlotMethods(PandasObject):
def __init__(self, data):
self._data = data
def __call__(self, *args, **kwargs):
raise NotImplementedError
class SeriesPlotMethods(BasePlotMethods):
"""Series plotting accessor and method
Examples
--------
>>> s.plot.line()
>>> s.plot.bar()
>>> s.plot.hist()
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='line')`` is equivalent to ``s.plot.line()``
"""
def __call__(self, kind='line', ax=None,
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False,
loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, **kwds):
return plot_series(self._data, kind=kind, ax=ax, figsize=figsize,
use_index=use_index, title=title, grid=grid,
legend=legend, style=style, logx=logx, logy=logy,
loglog=loglog, xticks=xticks, yticks=yticks,
xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize,
colormap=colormap, table=table, yerr=yerr,
xerr=xerr, label=label, secondary_y=secondary_y,
**kwds)
__call__.__doc__ = plot_series.__doc__
def line(self, **kwds):
"""
Line plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', **kwds)
def bar(self, **kwds):
"""
Vertical bar plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', **kwds)
def barh(self, **kwds):
"""
Horizontal bar plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', **kwds)
def box(self, **kwds):
"""
Boxplot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', **kwds)
def hist(self, bins=10, **kwds):
"""
Histogram
.. versionadded:: 0.17.0
Parameters
----------
bins: integer, default 10
Number of histogram bins to be used
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, **kwds):
"""
Area plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', **kwds)
def pie(self, **kwds):
"""
Pie chart
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', **kwds)
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
Examples
--------
>>> df.plot.line()
>>> df.plot.scatter('x', 'y')
>>> df.plot.hexbin()
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument:
``df.plot(kind='line')`` is equivalent to ``df.plot.line()``
"""
def __call__(self, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, **kwds):
return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend, style=style,
logx=logx, logy=logy, loglog=loglog, xticks=xticks,
yticks=yticks, xlim=xlim, ylim=ylim, rot=rot,
fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr, secondary_y=secondary_y,
sort_columns=sort_columns, **kwds)
__call__.__doc__ = plot_frame.__doc__
def line(self, x=None, y=None, **kwds):
"""
Line plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', x=x, y=y, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
Horizontal bar plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', x=x, y=y, **kwds)
def box(self, by=None, **kwds):
r"""
Boxplot
.. versionadded:: 0.17.0
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
\*\*kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', by=by, **kwds)
def hist(self, by=None, bins=10, **kwds):
"""
Histogram
.. versionadded:: 0.17.0
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
bins: integer, default 10
Number of histogram bins to be used
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', by=by, bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, x=None, y=None, **kwds):
"""
Area plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', x=x, y=y, **kwds)
def pie(self, y=None, **kwds):
"""
Pie chart
.. versionadded:: 0.17.0
Parameters
----------
y : label or position, optional
Column to plot.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', y=y, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Scatter plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
s : scalar or array_like, optional
Size of each point.
c : label or position, optional
Color of each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Hexbin plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
C : label or position, optional
The value at each `(x, y)` point.
reduce_C_function : callable, optional
Function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`).
gridsize : int, optional
Number of bins.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
| mit |
kiyoto/statsmodels | statsmodels/stats/tests/test_contingency_tables.py | 6 | 16501 | """
Tests for contingency table analyses.
"""
import numpy as np
import statsmodels.stats.contingency_tables as ctab
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
import os
import statsmodels.api as sm
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = "contingency_table_r_results.csv"
fpath = os.path.join(cur_dir, 'results', fname)
r_results = pd.read_csv(fpath)
tables = [None, None, None]
tables[0] = np.asarray([[23, 15], [19, 31]])
tables[1] = np.asarray([[144, 33, 84, 126],
[2, 4, 14, 29],
[0, 2, 6, 25],
[0, 0, 1, 5]])
tables[2] = np.asarray([[20, 10, 5],
[3, 30, 15],
[0, 5, 40]])
def test_homogeneity():
for k,table in enumerate(tables):
st = sm.stats.SquareTable(table, shift_zeros=False)
hm = st.homogeneity()
assert_allclose(hm.statistic, r_results.loc[k, "homog_stat"])
assert_allclose(hm.df, r_results.loc[k, "homog_df"])
# Test Bhapkar via its relationship to Stuart_Maxwell.
hmb = st.homogeneity(method="bhapkar")
assert_allclose(hmb.statistic, hm.statistic / (1 - hm.statistic / table.sum()))
def test_SquareTable_from_data():
np.random.seed(434)
df = pd.DataFrame(index=range(100), columns=["v1", "v2"])
df["v1"] = np.random.randint(0, 5, 100)
df["v2"] = np.random.randint(0, 5, 100)
table = pd.crosstab(df["v1"], df["v2"])
rslt1 = ctab.SquareTable(table)
rslt2 = ctab.SquareTable.from_data(df)
rslt3 = ctab.SquareTable(np.asarray(table))
assert_equal(rslt1.summary().as_text(),
rslt2.summary().as_text())
assert_equal(rslt2.summary().as_text(),
rslt3.summary().as_text())
def test_cumulative_odds():
table = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
table = np.asarray(table)
tbl_obj = ctab.Table(table)
cum_odds = tbl_obj.cumulative_oddsratios
assert_allclose(cum_odds[0, 0], 28 / float(5 * 11))
assert_allclose(cum_odds[0, 1], (3 * 15) / float(3 * 24), atol=1e-5,
rtol=1e-5)
assert_allclose(np.log(cum_odds), tbl_obj.cumulative_log_oddsratios,
atol=1e-5, rtol=1e-5)
def test_local_odds():
table = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
table = np.asarray(table)
tbl_obj = ctab.Table(table)
loc_odds = tbl_obj.local_oddsratios
assert_allclose(loc_odds[0, 0], 5 / 8.)
assert_allclose(loc_odds[0, 1], 12 / float(15), atol=1e-5,
rtol=1e-5)
assert_allclose(np.log(loc_odds), tbl_obj.local_log_oddsratios,
atol=1e-5, rtol=1e-5)
def test_stratified_table_cube():
"""
Test that we can pass a rank 3 ndarray or a list of rank 2
ndarrays to StratifiedTable and get the same results.
"""
tab1 = [[[8, 9], [6, 7]], [[4, 9], [5, 5]], [[8, 8], [9, 11]]]
tab2 = np.asarray(tab1).T
ct1 = ctab.StratifiedTable(tab1)
ct2 = ctab.StratifiedTable(tab2)
assert_allclose(ct1.oddsratio_pooled, ct2.oddsratio_pooled)
assert_allclose(ct1.logodds_pooled, ct2.logodds_pooled)
def test_resids():
# CHD x serum data
table = [[12, 8, 31, 41], [307, 246, 439, 245]]
# These results come from SAS
fit = [[22.083, 17.583, 32.536, 19.798],
[296.92, 236.42, 437.46, 266.2]]
c2 = [[4.6037, 5.223, 0.0725, 22.704],
[0.3424, 0.3885, 0.0054, 1.6886]]
# These are regression tests
pr = np.array([[-2.14562121, -2.28538719, -0.26923882, 4.7649169 ],
[ 0.58514314, 0.62325942, 0.07342547, -1.29946443]])
sr = np.array([[-2.55112945, -2.6338782 , -0.34712127, 5.5751083 ],
[ 2.55112945, 2.6338782 , 0.34712127, -5.5751083 ]])
tab = ctab.Table(table)
assert_allclose(tab.fittedvalues, fit, atol=1e-4, rtol=1e-4)
assert_allclose(tab.chi2_contribs, c2, atol=1e-4, rtol=1e-4)
assert_allclose(tab.resid_pearson, pr, atol=1e-4, rtol=1e-4)
assert_allclose(tab.standardized_resids, sr, atol=1e-4, rtol=1e-4)
def test_ordinal_association():
for k,table in enumerate(tables):
row_scores = 1 + np.arange(table.shape[0])
col_scores = 1 + np.arange(table.shape[1])
# First set of scores
rslt = ctab.Table(table, shift_zeros=False).test_ordinal_association(row_scores, col_scores)
assert_allclose(rslt.statistic, r_results.loc[k, "lbl_stat"])
assert_allclose(rslt.null_mean, r_results.loc[k, "lbl_expval"])
assert_allclose(rslt.null_sd**2, r_results.loc[k, "lbl_var"])
assert_allclose(rslt.zscore**2, r_results.loc[k, "lbl_chi2"], rtol=1e-5, atol=1e-5)
assert_allclose(rslt.pvalue, r_results.loc[k, "lbl_pvalue"], rtol=1e-5, atol=1e-5)
# Second set of scores
rslt = ctab.Table(table, shift_zeros=False).test_ordinal_association(row_scores, col_scores**2)
assert_allclose(rslt.statistic, r_results.loc[k, "lbl2_stat"])
assert_allclose(rslt.null_mean, r_results.loc[k, "lbl2_expval"])
assert_allclose(rslt.null_sd**2, r_results.loc[k, "lbl2_var"])
assert_allclose(rslt.zscore**2, r_results.loc[k, "lbl2_chi2"])
assert_allclose(rslt.pvalue, r_results.loc[k, "lbl2_pvalue"], rtol=1e-5, atol=1e-5)
def test_chi2_association():
np.random.seed(8743)
table = np.random.randint(10, 30, size=(4, 4))
from scipy.stats import chi2_contingency
rslt_scipy = chi2_contingency(table)
b = ctab.Table(table).test_nominal_association()
assert_allclose(b.statistic, rslt_scipy[0])
assert_allclose(b.pvalue, rslt_scipy[1])
def test_symmetry():
for k,table in enumerate(tables):
st = sm.stats.SquareTable(table, shift_zeros=False)
b = st.symmetry()
assert_allclose(b.statistic, r_results.loc[k, "bowker_stat"])
assert_equal(b.df, r_results.loc[k, "bowker_df"])
assert_allclose(b.pvalue, r_results.loc[k, "bowker_pvalue"])
def test_mcnemar():
# Use chi^2 without continuity correction
b1 = ctab.mcnemar(tables[0], exact=False, correction=False)
st = sm.stats.SquareTable(tables[0])
b2 = st.homogeneity()
assert_allclose(b1.statistic, b2.statistic)
assert_equal(b2.df, 1)
# Use chi^2 with continuity correction
b3 = ctab.mcnemar(tables[0], exact=False, correction=True)
assert_allclose(b3.pvalue, r_results.loc[0, "homog_cont_p"])
# Use binomial reference distribution
b4 = ctab.mcnemar(tables[0], exact=True)
assert_allclose(b4.pvalue, r_results.loc[0, "homog_binom_p"])
def test_cochranq():
"""
library(CVST)
table1 = matrix(c(1, 0, 1, 1,
0, 1, 1, 1,
1, 1, 1, 0,
0, 1, 0, 0,
0, 1, 0, 0,
1, 0, 1, 0,
0, 1, 0, 0,
1, 1, 1, 1,
0, 1, 0, 0), ncol=4, byrow=TRUE)
rslt1 = cochranq.test(table1)
table2 = matrix(c(0, 0, 1, 1, 0,
0, 1, 0, 1, 0,
0, 1, 1, 0, 1,
1, 0, 0, 0, 1,
1, 1, 0, 0, 0,
1, 0, 1, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 1, 0,
0, 0, 0, 0, 0), ncol=5, byrow=TRUE)
rslt2 = cochranq.test(table2)
"""
table = [[1, 0, 1, 1],
[0, 1, 1, 1],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 0, 0]]
table = np.asarray(table)
stat, pvalue, df = ctab.cochrans_q(table, return_object=False)
assert_allclose(stat, 4.2)
assert_allclose(df, 3)
table = [[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]]
table = np.asarray(table)
stat, pvalue, df = ctab.cochrans_q(table, return_object=False)
assert_allclose(stat, 1.2174, rtol=1e-4)
assert_allclose(df, 4)
# Cochran's q and Mcnemar are equivalent for 2x2 tables
data = table[:, 0:2]
xtab = np.asarray(pd.crosstab(data[:, 0], data[:, 1]))
b1 = ctab.cochrans_q(data, return_object=True)
b2 = ctab.mcnemar(xtab, exact=False, correction=False)
assert_allclose(b1.statistic, b2.statistic)
assert_allclose(b1.pvalue, b2.pvalue)
class CheckStratifiedMixin(object):
def initialize(self, tables):
self.rslt = ctab.StratifiedTable(tables)
self.rslt_0 = ctab.StratifiedTable(tables, shift_zeros=True)
tables_pandas = [pd.DataFrame(x) for x in tables]
self.rslt_pandas = ctab.StratifiedTable(tables_pandas)
def test_oddsratio_pooled(self):
assert_allclose(self.rslt.oddsratio_pooled, self.oddsratio_pooled,
rtol=1e-4, atol=1e-4)
def test_logodds_pooled(self):
assert_allclose(self.rslt.logodds_pooled, self.logodds_pooled,
rtol=1e-4, atol=1e-4)
def test_null_odds(self):
rslt = self.rslt.test_null_odds(correction=True)
assert_allclose(rslt.statistic, self.mh_stat, rtol=1e-4, atol=1e-5)
assert_allclose(rslt.pvalue, self.mh_pvalue, rtol=1e-4, atol=1e-4)
def test_oddsratio_pooled_confint(self):
lcb, ucb = self.rslt.oddsratio_pooled_confint()
assert_allclose(lcb, self.or_lcb, rtol=1e-4, atol=1e-4)
assert_allclose(ucb, self.or_ucb, rtol=1e-4, atol=1e-4)
def test_logodds_pooled_confint(self):
lcb, ucb = self.rslt.logodds_pooled_confint()
assert_allclose(lcb, np.log(self.or_lcb), rtol=1e-4,
atol=1e-4)
assert_allclose(ucb, np.log(self.or_ucb), rtol=1e-4,
atol=1e-4)
def test_equal_odds(self):
if not hasattr(self, "or_homog"):
return
rslt = self.rslt_0.test_equal_odds()
assert_allclose(rslt.statistic, self.or_homog, rtol=1e-4, atol=1e-4)
assert_allclose(rslt.pvalue, self.or_homog_p, rtol=1e-4, atol=1e-4)
def test_pandas(self):
assert_equal(self.rslt.summary().as_text(),
self.rslt_pandas.summary().as_text())
def test_from_data(self):
np.random.seed(241)
df = pd.DataFrame(index=range(100), columns=("v1", "v2", "strat"))
df["v1"] = np.random.randint(0, 2, 100)
df["v2"] = np.random.randint(0, 2, 100)
df["strat"] = np.kron(np.arange(10), np.ones(10))
tables = []
for k in range(10):
ii = np.arange(10*k, 10*(k+1))
tables.append(pd.crosstab(df.loc[ii, "v1"], df.loc[ii, "v2"]))
rslt1 = ctab.StratifiedTable(tables)
rslt2 = ctab.StratifiedTable.from_data("v1", "v2", "strat", df)
assert_equal(rslt1.summary().as_text(), rslt2.summary().as_text())
class TestStratified1(CheckStratifiedMixin):
"""
data = array(c(0, 0, 6, 5,
3, 0, 3, 6,
6, 2, 0, 4,
5, 6, 1, 0,
2, 5, 0, 0),
dim=c(2, 2, 5))
rslt = mantelhaen.test(data)
"""
def __init__(self):
tables = [None] * 5
tables[0] = np.array([[0, 0], [6, 5]])
tables[1] = np.array([[3, 0], [3, 6]])
tables[2] = np.array([[6, 2], [0, 4]])
tables[3] = np.array([[5, 6], [1, 0]])
tables[4] = np.array([[2, 5], [0, 0]])
self.initialize(tables)
self.oddsratio_pooled = 7
self.logodds_pooled = np.log(7)
self.mh_stat = 3.9286
self.mh_pvalue = 0.04747
self.or_lcb = 1.026713
self.or_ucb = 47.725133
class TestStratified2(CheckStratifiedMixin):
"""
data = array(c(20, 14, 10, 24,
15, 12, 3, 15,
3, 2, 3, 2,
12, 3, 7, 5,
1, 0, 3, 2),
dim=c(2, 2, 5))
rslt = mantelhaen.test(data)
"""
def __init__(self):
tables = [None] * 5
tables[0] = np.array([[20, 14], [10, 24]])
tables[1] = np.array([[15, 12], [3, 15]])
tables[2] = np.array([[3, 2], [3, 2]])
tables[3] = np.array([[12, 3], [7, 5]])
tables[4] = np.array([[1, 0], [3, 2]])
self.initialize(tables)
self.oddsratio_pooled = 3.5912
self.logodds_pooled = np.log(3.5912)
self.mh_stat = 11.8852
self.mh_pvalue = 0.0005658
self.or_lcb = 1.781135
self.or_ucb = 7.240633
class TestStratified3(CheckStratifiedMixin):
"""
data = array(c(313, 512, 19, 89,
207, 353, 8, 17,
205, 120, 391, 202,
278, 139, 244, 131,
138, 53, 299, 94,
351, 22, 317, 24),
dim=c(2, 2, 6))
rslt = mantelhaen.test(data)
"""
def __init__(self):
tables = [None] * 6
tables[0] = np.array([[313, 512], [19, 89]])
tables[1] = np.array([[207, 353], [8, 17]])
tables[2] = np.array([[205, 120], [391, 202]])
tables[3] = np.array([[278, 139], [244, 131]])
tables[4] = np.array([[138, 53], [299, 94]])
tables[5] = np.array([[351, 22], [317, 24]])
self.initialize(tables)
self.oddsratio_pooled = 1.101879
self.logodds_pooled = np.log(1.101879)
self.mh_stat = 1.3368
self.mh_pvalue = 0.2476
self.or_lcb = 0.9402012
self.or_ucb = 1.2913602
self.or_homog = 18.83297
self.or_homog_p = 0.002064786
class Check2x2Mixin(object):
def initialize(self):
self.tbl_obj = ctab.Table2x2(self.table)
self.tbl_data_obj = ctab.Table2x2.from_data(self.data)
def test_oddsratio(self):
assert_allclose(self.tbl_obj.oddsratio, self.oddsratio)
def test_log_oddsratio(self):
assert_allclose(self.tbl_obj.log_oddsratio, self.log_oddsratio)
def test_log_oddsratio_se(self):
assert_allclose(self.tbl_obj.log_oddsratio_se, self.log_oddsratio_se)
def test_oddsratio_pvalue(self):
assert_allclose(self.tbl_obj.oddsratio_pvalue(), self.oddsratio_pvalue)
def test_oddsratio_confint(self):
lcb1, ucb1 = self.tbl_obj.oddsratio_confint(0.05)
lcb2, ucb2 = self.oddsratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_riskratio(self):
assert_allclose(self.tbl_obj.riskratio, self.riskratio)
def test_log_riskratio(self):
assert_allclose(self.tbl_obj.log_riskratio, self.log_riskratio)
def test_log_riskratio_se(self):
assert_allclose(self.tbl_obj.log_riskratio_se, self.log_riskratio_se)
def test_riskratio_pvalue(self):
assert_allclose(self.tbl_obj.riskratio_pvalue(), self.riskratio_pvalue)
def test_riskratio_confint(self):
lcb1, ucb1 = self.tbl_obj.riskratio_confint(0.05)
lcb2, ucb2 = self.riskratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_log_riskratio_confint(self):
lcb1, ucb1 = self.tbl_obj.log_riskratio_confint(0.05)
lcb2, ucb2 = self.log_riskratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_from_data(self):
assert_equal(self.tbl_obj.summary().as_text(),
self.tbl_data_obj.summary().as_text())
class Test2x2_1(Check2x2Mixin):
def __init__(self):
data = np.zeros((8, 2))
data[:, 0] = [0, 0, 1, 1, 0, 0, 1, 1]
data[:, 1] = [0, 1, 0, 1, 0, 1, 0, 1]
self.data = np.asarray(data)
self.table = np.asarray([[2, 2], [2, 2]])
self.initialize()
self.oddsratio = 1.
self.log_oddsratio = 0.
self.log_oddsratio_se = np.sqrt(2)
self.oddsratio_confint = [0.062548836166112329, 15.987507702689751]
self.oddsratio_pvalue = 1.
self.riskratio = 1.
self.log_riskratio = 0.
self.log_riskratio_se = 1 / np.sqrt(2)
self.riskratio_pvalue = 1.
self.riskratio_confint = [0.25009765325990629,
3.9984381579173824]
self.log_riskratio_confint = [-1.3859038243496782,
1.3859038243496782]
| bsd-3-clause |
henrykironde/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
HRZaheri/sklearn-theano | examples/plot_single_localization.py | 9 | 1558 | """
=======================================
Drawing bounding boxes for localization
=======================================
Drawing a bounding box on a detected object is crucial to properly finding
objects in images. One very simple way to do this is by simply finding all
points with a matching classification, then creating a box using the minimum
and maximum values for X and Y of the matching points.
For more detail about how this works in detail, see the
``plot_localization_tutorial`` example.
"""
print(__doc__)
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sklearn_theano.datasets import load_sample_image
from sklearn_theano.feature_extraction import OverfeatLocalizer
from sklearn_theano.feature_extraction import get_all_overfeat_labels
def convert_points_to_box(points, color, alpha):
upper_left_point = (points[:, 0].min(), points[:, 1].min())
width = points[:, 0].max() - points[:, 0].min()
height = points[:, 1].max() - points[:, 1].min()
return Rectangle(upper_left_point, width, height, ec=color,
fc=color, alpha=alpha)
X = load_sample_image("sloth.jpg")
sloth_label = [label for label in get_all_overfeat_labels()
if 'three-toed sloth' in label][0]
clf = OverfeatLocalizer(match_strings=[sloth_label])
sloth_points = clf.predict(X)[0]
sloth_box = convert_points_to_box(sloth_points, 'orange', .4)
plt.imshow(X)
ax = plt.gca()
ax.autoscale(enable=False)
ax.add_patch(sloth_box)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.show()
| bsd-3-clause |
CoolProp/CoolProp | wrappers/Python/CoolProp/Plots/SimpleCyclesExpansion.py | 2 | 6080 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import numpy as np
import CoolProp
from .Common import process_fluid_state
from .SimpleCycles import BaseCycle, StateContainer
class BasePowerCycle(BaseCycle):
"""A thermodynamic cycle for power producing processes.
Defines the basic properties and methods to unify access to
power cycle-related quantities.
"""
def __init__(self, fluid_ref='HEOS::Water', graph_type='TS', **kwargs):
"""see :class:`CoolProp.Plots.SimpleCycles.BaseCycle` for details."""
BaseCycle.__init__(self, fluid_ref, graph_type, **kwargs)
def eta_carnot(self):
"""Carnot efficiency
Calculates the Carnot efficiency for the specified process, :math:`\eta_c = 1 - \frac{T_c}{T_h}`.
Returns
-------
float
"""
Tvector = self._cycle_states.T
return 1. - np.min(Tvector) / np.max(Tvector)
def eta_thermal(self):
"""Thermal efficiency
The thermal efficiency for the specified process(es), :math:`\eta_{th} = \frac{\dot{W}_{exp} - \dot{W}_{pum}}{\dot{Q}_{in}}`.
Returns
-------
float
"""
raise NotImplementedError("Implement it in the subclass.")
class SimpleRankineCycle(BasePowerCycle):
"""A simple Rankine cycle *without* regeneration"""
STATECOUNT = 4
STATECHANGE = [
lambda inp: BaseCycle.state_change(inp, 'S', 'P', 0, ty1='log', ty2='log'), # Pumping process
lambda inp: BaseCycle.state_change(inp, 'H', 'P', 1, ty1='lin', ty2='lin'), # Heat addition
lambda inp: BaseCycle.state_change(inp, 'H', 'P', 2, ty1='log', ty2='log'), # Expansion
lambda inp: BaseCycle.state_change(inp, 'H', 'P', 3, ty1='lin', ty2='lin') # Heat removal
]
def __init__(self, fluid_ref='HEOS::Water', graph_type='TS', **kwargs):
"""see :class:`CoolProp.Plots.SimpleCycles.BasePowerCycle` for details."""
BasePowerCycle.__init__(self, fluid_ref, graph_type, **kwargs)
def simple_solve(self, T0, p0, T2, p2, eta_exp, eta_pum, fluid=None, SI=True):
""""
A simple Rankine cycle calculation
Parameters
----------
T0 : float
The coldest point, before the pump
p0 : float
The lowest pressure, before the pump
T2 : float
The hottest point, before the expander
p2 : float
The highest pressure, before the expander
eta_exp : float
Isentropic expander efficiency
eta_pum : float
Isentropic pump efficiency
Examples
--------
>>> import CoolProp
>>> from CoolProp.Plots import PropertyPlot
>>> from CoolProp.Plots import SimpleRankineCycle
>>> pp = PropertyPlot('HEOS::Water', 'TS', unit_system='EUR')
>>> pp.calc_isolines(CoolProp.iQ, num=11)
>>> cycle = SimpleRankineCycle('HEOS::Water', 'TS', unit_system='EUR')
>>> T0 = 300
>>> pp.state.update(CoolProp.QT_INPUTS,0.0,T0+15)
>>> p0 = pp.state.keyed_output(CoolProp.iP)
>>> T2 = 700
>>> pp.state.update(CoolProp.QT_INPUTS,1.0,T2-150)
>>> p2 = pp.state.keyed_output(CoolProp.iP)
>>> cycle.simple_solve(T0, p0, T2, p2, 0.7, 0.8, SI=True)
>>> cycle.steps = 50
>>> sc = cycle.get_state_changes()
>>> import matplotlib.pyplot as plt
>>> plt.close(cycle.figure)
>>> pp.draw_process(sc)
"""
if fluid is not None: self.state = process_fluid_state(fluid)
if self._state is None:
raise ValueError("You have to specify a fluid before you can calculate.")
cycle_states = StateContainer(unit_system=self._system)
if not SI:
Tc = self._system[CoolProp.iT].to_SI
pc = self._system[CoolProp.iP].to_SI
T0 = Tc(T0)
p0 = pc(p0)
T2 = Tc(T2)
p2 = pc(p2)
# Subcooled liquid
self.state.update(CoolProp.PT_INPUTS, p0, T0)
h0 = self.state.hmass()
s0 = self.state.smass()
# Just a showcase for the different accessor methods
cycle_states[0, 'H'] = h0
cycle_states[0]['S'] = s0
cycle_states[0][CoolProp.iP] = p0
cycle_states[0, CoolProp.iT] = T0
# Pressurised liquid
p1 = p2
self.state.update(CoolProp.PSmass_INPUTS, p1, s0)
h1 = h0 + (self.state.hmass() - h0) / eta_pum
self.state.update(CoolProp.HmassP_INPUTS, h1, p1)
s1 = self.state.smass()
T1 = self.state.T()
cycle_states[1, 'H'] = h1
cycle_states[1, 'S'] = s1
cycle_states[1, 'P'] = p1
cycle_states[1, 'T'] = T1
# Evaporated vapour
self.state.update(CoolProp.PT_INPUTS, p2, T2)
h2 = self.state.hmass()
s2 = self.state.smass()
cycle_states[2, 'H'] = h2
cycle_states[2, 'S'] = s2
cycle_states[2, 'P'] = p2
cycle_states[2, 'T'] = T2
# Expanded gas
p3 = p0
self.state.update(CoolProp.PSmass_INPUTS, p3, s2)
h3 = h2 - eta_exp * (h2 - self.state.hmass())
self.state.update(CoolProp.HmassP_INPUTS, h3, p3)
s3 = self.state.smass()
T3 = self.state.T()
cycle_states[3, 'H'] = h3
cycle_states[3, 'S'] = s3
cycle_states[3, 'P'] = p3
cycle_states[3, 'T'] = T3
w_net = h2 - h3
q_boiler = h2 - h1
eta_th = w_net / q_boiler
self.cycle_states = cycle_states
self.fill_states()
def eta_thermal(self):
"""Thermal efficiency
The thermal efficiency for the specified process(es), :math:`\eta_{th} = \frac{\dot{W}_{exp} - \dot{W}_{pum}}{\dot{Q}_{in}}`.
Returns
-------
float
"""
w_net = self.cycle_states[2].H - self.cycle_states[3].H - (self.cycle_states[1].H - self.cycle_states[0].H)
q_boiler = self.cycle_states[2].H - self.cycle_states[1].H
return w_net / q_boiler
| mit |
aiguofer/bokeh | bokeh/charts/conftest.py | 12 | 1484 | """Defines chart-wide shared test fixtures."""
import numpy as np
import pandas as pd
import pytest
from bokeh.sampledata.autompg import autompg
class TestData(object):
"""Contains properties with easy access to data used across tests."""
def __init__(self):
self.cat_list = ['a', 'c', 'a', 'b']
self.list_data = [[1, 2, 3, 4], [2, 3, 4, 5]]
self.array_data = [np.array(item) for item in self.list_data]
self.dict_data = {'col1': self.list_data[0],
'col2': self.list_data[1]}
self.pd_data = pd.DataFrame(self.dict_data)
self.records_data = self.pd_data.to_dict(orient='records')
self.auto_data = autompg
self._setup_auto_mpg()
def _setup_auto_mpg(self):
# add a boolean column
self.auto_data['large_displ'] = self.auto_data['displ'] > 350
# add categorical column
cat = pd.Categorical.from_array(self.auto_data['cyl'])
new_order = list(reversed(sorted(cat.categories.values.tolist())))
self.auto_data['reversed_cyl'] = cat.reorder_categories(new_order)
@pytest.fixture(scope='module')
def test_data():
return TestData()
@pytest.fixture(scope='module')
def wide_data_with_cat(test_data):
data = test_data.dict_data.copy()
data['col3'] = test_data.cat_list
return data
@pytest.fixture(scope='module')
def df_with_cat_index(test_data):
return pd.DataFrame(test_data.dict_data, index=test_data.cat_list)
| bsd-3-clause |
theandygross/CancerData | src/Data/Firehose.py | 1 | 12019 | """
Created on Sep 4, 2012
Set of functions to read in data that has been formated in the
BROAD GDAC Firehose data processing pipeline.
Nothing in here should depend on any other modules.
I am relying heavily on pandas for this project, so the main
goal of these functions is get data from the Firehose's tables
into Pandas data-structures that I can work with.
There is a little bit of pre-processing that is done to get the
files from Firehose into the local file-system in a reasonably
organized hierarchy, that is done at the time of data download
and in the run's initialization. These dependencies should
eventually be circumvented.
@author: agross
"""
import os as os
import numpy as np
import pandas as pd
def fix_barcode_columns(df, patients=None, tissue_code='All', get_batch=False):
"""
Takes TCGA barcode and reformats it into a MultiIndex if all tissue_codes
are desired, or just pulls the correct tissue codes and filteres the
DataFrame.
df: pandas DataFrame
patients: patient list to filter on
tissue_code: ['01','11','All'] #if all returns MultiIndex
"""
if get_batch is False:
df.columns = pd.MultiIndex.from_tuples([(i[:12], i[13:15]) for i
in df.columns])
else:
df.columns = pd.MultiIndex.from_tuples([(i[:12], i[13:15], i[21:24]) for i
in df.columns])
if patients is not None:
df = df.ix[:, patients]
if tissue_code != 'All':
try:
df = df.T.xs(tissue_code, level=1).T # pandas bug
df = df.groupby(axis=1, level=0).first()
except KeyError: # try different cross-seciton
new_code = pd.value_counts(df.columns.get_level_values(1)).idxmax()
df = df.T.xs(new_code, level=1).T # pandas bug
df = df.groupby(axis=1, level=0).first()
else:
df = df.groupby(axis=1, level=[0, 1]).first()
return df
def get_dataset_path(data_path, cancer, data_type, ext):
"""
This is a helper to get paths to a particular data-set.
In processing the data, we develop relatively complicated file hierarchies
to not have a ton of folders at the top level and make it easier to get
to files manually. This makes it a little tough to track down files
automatically so this function fills that role.
data_type: the top-level data-type of the file (i.e. rnaseqv2,
transcriptome,...)
ext: the file you are looking for (i.e. RSEM_genes_normalized,
junction_quantification, ...)
"""
stddata_path = data_path + 'stddata/' + cancer
if not os.path.isdir(stddata_path):
return
data_types = filter(lambda g: g.startswith(data_type),
os.listdir(stddata_path))
if data_type in data_types: # get the paths
paths = [f[0] for f in list(os.walk(stddata_path + '/' + data_type)) if
(ext + '/data') in f[0]]
else:
return
f = [path + '/' + f for path in paths for f in os.listdir(path)
if 'data' in f] # pull the data file
return f
def get_mutation_matrix(data_path, cancer, tissue_code='01'):
"""
Get gene by patient mutation matrix.
Here I filter by the is_silent column in the MAF file,
so I am returning only non-silent mutations.
"""
path = '{}/analyses/{}/Mutation_Assessor/'.format(data_path, cancer)
f = [f for f in os.listdir(path) if f.endswith('.maf.annotated')][0]
maf = pd.read_table(path + f, low_memory=False)
maf = maf.dropna(how='all', axis=[0, 1])
maf = maf.set_index(['Hugo_Symbol', 'Tumor_Sample_Barcode'])
non_silent = maf[maf.is_silent == 0]
non_silent['counter'] = 1
hit_matrix = non_silent.counter.groupby(level=[0, 1]).sum().unstack()
hit_matrix = fix_barcode_columns(hit_matrix, tissue_code=tissue_code)
return hit_matrix
def get_submaf(data_path, cancer, genes='All', fields='basic'):
"""
Pull a sub-section of the MAF file for analysis.
genes: list of genes for which to return data
fields: ['basic', 'all']: if basic, returns reduced version of MAF
"""
path = '{}/analyses/{}/Mutation_Assessor/'.format(data_path, cancer)
f = [f for f in os.listdir(path) if f.endswith('.maf.annotated')][0]
maf = pd.read_table(path + f, low_memory=False)
maf = maf.dropna(how='all', axis=[0, 1])
maf['Tissue_Type'] = maf.Tumor_Sample_Barcode.map(lambda s: s[13:15])
maf.Tumor_Sample_Barcode = maf.Tumor_Sample_Barcode.map(lambda s: s[:12])
if genes != 'All':
maf = maf[maf.Hugo_Symbol.isin(genes)]
def get_allele(s):
alleles = [s['Tumor_Seq_Allele1'], s['Tumor_Seq_Allele2']]
return [a for a in alleles if a != s['Reference_Allele']][0]
maf['Alt_Allele'] = maf.apply(get_allele, 1)
if fields == 'basic':
maf = maf[['Hugo_Symbol', 'NCBI_Build', 'Chromosome', 'Start_position',
'End_position', 'Strand', 'Reference_Allele',
'Alt_Allele', 'Tumor_Sample_Barcode']]
maf = maf.set_index('Hugo_Symbol', append=True)
maf.index = maf.index.swaplevel(0, 1)
return maf
def get_gistic_gene_matrix(data_path, cancer, tissue_code='01'):
"""
Reads in gene by patient copy-number alteration matrix.
Index is MultiIndex with ['Cytoband', 'Locus ID', 'Gene Symbol']
on the levels.
"""
path = '{}/analyses/{}/CopyNumber_Gistic2/'.format(data_path, cancer)
gistic = pd.read_table(path + 'all_thresholded.by_genes.txt',
index_col=[2, 1, 0],
low_memory=False)
gistic = fix_barcode_columns(gistic, tissue_code=tissue_code)
return gistic
def get_gistic_arm_values(data_path, cancer, tissue_code='01'):
"""
Reads in arm by patient copy-number alteration matrix.
"""
path = '{}/analyses/{}/CopyNumber_Gistic2/'.format(data_path, cancer)
gistic = pd.read_table(path + 'broad_values_by_arm.txt', index_col=0,
low_memory=False)
gistic = fix_barcode_columns(gistic, tissue_code=tissue_code)
return gistic
def get_gistic_lesions(data_path, cancer, patients=None, tissue_code='01'):
"""
Reads in lesion by patient CNA matrix.
Returns thresholded calls as made by GISTIC2 in the Firehose pipeline.
"""
path = '{}/analyses/{}/CopyNumber_Gistic2/'.format(data_path, cancer)
gistic = pd.read_table(path + 'all_lesions.conf_99.txt', index_col=[0, 1],
low_memory=False)
lesions = gistic.select(lambda s: 'TCGA' in s, axis=1)
lesions = lesions.select(lambda s: 'values' not in s[0], axis=0)
from_tuples = pd.MultiIndex.from_tuples
lesions.index = from_tuples([(s[0].split(' ')[0], s[1].strip(), 'Lesion')
for s in lesions.index])
lesions = lesions.groupby(level=[0, 1, 2]).first()
if 'Deletion' in lesions.index.get_level_values(0):
lesions.T['Deletion'] = (lesions.T['Deletion'] * -1).replace(-0, 0)
lesions = fix_barcode_columns(lesions, patients, tissue_code)
return lesions
def get_gistic(cancer_name, data_path, filter_with_rna=True,
collapse_on_bands=True, min_patients=5):
lesions = get_gistic_lesions(cancer_name, data_path)
return lesions
def read_rppa(data_path, cancer, patients=None, tissue_code='01'):
"""
Reads in antibody by patient reverse-phase protein array matrix.
Use for more recent firehose runs.
"""
files = get_dataset_path(data_path, cancer, 'protein_exp',
'protein_normalization')
if files is None:
return
else:
f = files[0]
rppa = pd.read_table(f, index_col=0, low_memory=False,
skiprows=[1])
rppa = fix_barcode_columns(rppa, tissue_code=tissue_code)
return rppa
def read_rppa_ann(data_path, cancer, patients=None, tissue_code='01'):
"""
This is a function for reading the old format tagged with
"annotated_with_gene".
Reads in antibody by patient reverse-phase protein array matrix.
Index is MultiIndex with ['protien','antibody'] on the levels.
"""
path = '{}/stddata/{}/RPPA_AnnotateWithGene/'.format(data_path, cancer)
rppa = pd.read_table(path + cancer + '.rppa.txt', index_col=0,
low_memory=False)
rppa['protien'] = rppa.index.map(lambda s: s.split('|')[0])
rppa['antibody'] = rppa.index.map(lambda s: s.split('|')[1])
rppa = rppa.set_index(['protien', 'antibody'])
rppa = fix_barcode_columns(rppa, tissue_code=tissue_code)
return rppa
def read_rnaSeq(data_path, cancer, patients=None, average_on_genes=True,
tissue_code='01', get_batch=False):
"""
Reads in gene by patient rnaSeq mRNA expression matrix.
Data is log-transformed and a lower bound of -3 (1/8 read per million)
is set.
"""
files = get_dataset_path(data_path, cancer, 'rnaseqv2',
'RSEM_genes_normalized')
if files is None:
return
rnaSeq = pd.concat([pd.read_table(f, index_col=0, skiprows=[1],
low_memory=False)
for f in files])
rnaSeq = np.log2(rnaSeq).replace(-np.inf, -3.) # close enough to 0
if average_on_genes: # Pretty much all duplicates are unknown ('?')
rnaSeq = rnaSeq.groupby(by=lambda n: n.split('|')[0]).mean()
rnaSeq = fix_barcode_columns(rnaSeq, patients, tissue_code, get_batch)
return rnaSeq
def read_rnaSeq_splice_junctions(data_path, cancer, patients=None,
tissue_code='01'):
"""
Reads in gene by patient rnaSeq mRNA splice junction matrix.
Values are raw counts.
"""
files = get_dataset_path(data_path, cancer, 'rnaseqv2',
'junction_quantification')
if files is None:
return
rnaSeq = pd.concat([pd.read_table(f, index_col=0, skiprows=[1])
for f in files])
rnaSeq = fix_barcode_columns(rnaSeq, patients, tissue_code)
return rnaSeq
def read_mrna(data_path, cancer, patients=None, tissue_code='01'):
"""
Reads in gene by patient microarray gene expression.
"""
files = get_dataset_path(data_path, cancer, 'transcriptome',
'unc_lowess_normalization_gene_level')
if files is None:
return
mrna = pd.concat([pd.read_table(f, index_col=0, skiprows=[1],
na_values=['null'],
low_memory=False)
for f in files])
mrna = fix_barcode_columns(mrna, patients, tissue_code)
return mrna
def read_miRNASeq(data_path, cancer, patients=None, tissue_code='01'):
"""
Reads in miRNA by patient miRNASeq matrix.
Data is log-transformed and a lower bound of -3 (1/8 read per million)
is set.
They often use both HiSeq and GA2 for this, so I'm merging the two,
not entirely sure if that's Kosher.
"""
stddata_path = data_path + 'stddata/' + cancer
paths = [f[0] for f in list(os.walk(stddata_path + '/mirnaseq'))
if 'miR_gene_expression/data' in f[0]]
data = []
for path in paths:
f = [f for f in os.listdir(path) if 'data' in f][0]
mirna = pd.read_table(path + '/' + f, index_col=0, header=None,
low_memory=False)
data.append(mirna)
mirna = pd.concat(data, axis=1)
mirna = mirna.T.set_index(['miRNA_ID', 'Hybridization REF'])
mirna = mirna.sortlevel(level=0).ix['reads_per_million_miRNA_mapped']
mirna = np.log2(mirna.astype(float)).replace(-np.inf, -3.) # close to 0
mirna = mirna.T
mirna = mirna.groupby(level=0, axis=1).last() # Use the HiSeq data over GA2
mirna = fix_barcode_columns(mirna, patients, tissue_code)
return mirna
| mit |
iamkingmaker/zipline | zipline/utils/cli.py | 4 | 8497 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
from zipline.errors import NoSourceError, PipelineDateError
DEFAULTS = {
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL',
'metadata_index': 'symbol',
'source_time_column': 'Date',
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', '-d', choices=('yahoo',))
parser.add_argument('--source_time_column', '-t')
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
parser.add_argument('--metadata_path', '-m')
parser.add_argument('--metadata_index', '-x')
parser.add_argument('--print-algo', '-p', dest='print_algo',
action='store_true')
parser.add_argument('--no-print-algo', '-q', dest='print_algo',
action='store_false')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = kwargs['start']
end = kwargs['end']
# Compare against None because strings/timestamps may have been given
if start is not None:
start = pd.Timestamp(start, tz='UTC')
if end is not None:
end = pd.Timestamp(end, tz='UTC')
# Fail out if only one bound is provided
if ((start is None) or (end is None)) and (start != end):
raise PipelineDateError(start=start, end=end)
# Check if start and end are provided, and if the sim_params need to read
# a start and end from the DataSource
if start is None:
overwrite_sim_params = True
else:
overwrite_sim_params = False
symbols = kwargs['symbols'].split(',')
asset_identifier = kwargs['metadata_index']
# Pull asset metadata
asset_metadata = kwargs.get('asset_metadata', None)
asset_metadata_path = kwargs['metadata_path']
# Read in a CSV file, if applicable
if asset_metadata_path is not None:
if os.path.isfile(asset_metadata_path):
asset_metadata = pd.read_csv(asset_metadata_path,
index_col=asset_identifier)
source_arg = kwargs['source']
source_time_column = kwargs['source_time_column']
if source_arg is None:
raise NoSourceError()
elif source_arg == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
elif os.path.isfile(source_arg):
source = zipline.data.load_prices_from_csv(
filepath=source_arg,
identifier_col=source_time_column
)
elif os.path.isdir(source_arg):
source = zipline.data.load_prices_from_csv_folder(
folderpath=source_arg,
identifier_col=source_time_column
)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
analyze_fname = os.path.splitext(algo_fname)[0] + '_analyze.py'
if os.path.exists(analyze_fname):
with open(analyze_fname, 'r') as fd:
# Simply append
algo_text += fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'),
asset_metadata=asset_metadata,
identifiers=symbols,
start=start,
end=end)
perf = algo.run(source, overwrite_sim_params=overwrite_sim_params)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
| apache-2.0 |
zfrenchee/pandas | pandas/tests/io/json/test_pandas.py | 1 | 50234 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
import json
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
def setup_method(self, method):
self.dirpath = tm.get_data_path()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
def teardown_method(self, method):
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
tm.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
tm.assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assert_raises_regex(ValueError,
r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
assert not df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
# GH 7445
result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing(object):
def __init__(self, hexed):
self.hexed = hexed
if compat.PY2:
self.binary = hexed.decode('hex')
else:
self.binary = bytes.fromhex(hexed)
def __str__(self):
return self.hexed
hexed = '574b4454ba8c5eb4f98a8f45'
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({'A': [binthing.hexed]})
assert df_printable.to_json() == \
'{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({'A': [binthing]})
with pytest.raises(OverflowError):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({'A': [binthing], 'B': [1]},
columns=['A', 'B'])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
assert df_nonprintable.to_json(default_handler=str) == \
'{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
assert df_mixed.to_json(default_handler=str) == \
'{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
df = pd.DataFrame({'foo': [1337], 'bar' * 100000: [1]})
assert df.to_json() == \
'{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
bar=('bar' * 100000))
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
pytest.raises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.iloc[1, df.columns.get_loc('date')] = pd.NaT
df.iloc[5, df.columns.get_loc('date')] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
dl = df.columns.get_loc('date')
df.iloc[1, dl] = Timestamp('19710101 20:43:42')
df.iloc[2, dl] = Timestamp('21460101 20:43:42')
df.iloc[4, dl] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assert_raises_regex(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
assert result[c].dtype == 'datetime64[ns]'
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == 'timedelta64[ns]'
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
assert dumps(df_list, default_handler=default,
orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00-0500')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_s3_jsonl(self, s3_resource):
pytest.importorskip('s3fs')
# GH17200
result = read_json('s3n://pandas-test/items.jsonl', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with ensure_clean('tmp_items.json') as path:
with open(path, 'w') as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e+21, index=['articleId'])
assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e+21, index=['articleId'], columns=[0])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]],
columns=["a\\", 'b'])
result = df.to_json(orient="records", lines=True)
expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n'
'{"a\\\\":"foo\\"","b":"bar"}')
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
pytest.skip("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({'a': [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize('data, expected', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo'),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
index=[['a', 'b'], ['c', 'd']]),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(Series([1, 2, 3], name='A'),
{'name': 'A', 'data': [1, 2, 3]}),
(Series([1, 2, 3], name='A').rename_axis('foo'),
{'name': 'A', 'data': [1, 2, 3]}),
(Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']]),
{'name': 'A', 'data': [1, 2]}),
])
def test_index_false_to_json_split(self, data, expected):
# GH 17394
# Testing index=False in to_json with orient='split'
result = data.to_json(orient='split', index=False)
result = json.loads(result)
assert result == expected
@pytest.mark.parametrize('data', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo')),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
index=[['a', 'b'], ['c', 'd']])),
(Series([1, 2, 3], name='A')),
(Series([1, 2, 3], name='A').rename_axis('foo')),
(Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']])),
])
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
result = data.to_json(orient='table', index=False)
result = json.loads(result)
expected = {
'schema': pd.io.json.build_table_schema(data, index=False),
'data': DataFrame(data).to_dict(orient='records')
}
assert result == expected
@pytest.mark.parametrize('orient', [
'records', 'index', 'columns', 'values'
])
def test_index_false_error_to_json(self, orient):
# GH 17394
# Testing error message from to_json with index=False
df = pd.DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])
with tm.assert_raises_regex(ValueError, "'index=False' is only "
"valid when 'orient' is "
"'split' or 'table'"):
df.to_json(orient=orient, index=False)
| bsd-3-clause |
polyanskiy/refractiveindex.info-scripts | scripts/Adachi 1989 - GaSb.py | 1 | 3777 | # -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-06-17
# Original data: Adachi 1989, https://doi.org/10.1063/1.343580
import numpy as np
import matplotlib.pyplot as plt
π = np.pi
# model parameters
E0 = 0.72 #eV
Δ0 = 1.46-E0 #eV
E1 = 2.05 #eV
Δ1 = 2.50-E1 #eV
E2 = 4.0 #eV
Eg = 0.76 #eV
A = 0.71 #eV**1.5
B1 = 6.68
B11 = 14.29 #eV**-0.5
Γ = 0.09 #eV
C = 5.69
γ = 0.290
D = 7.4
εinf = 1.0
def H(x): #Heviside function
return 0.5 * (np.sign(x) + 1)
def Epsilon_A(ħω): #E0
χ0 = ħω/E0
χso = ħω / (E0+Δ0)
H0 = H(1-χ0)
Hso = H(1-χso)
fχ0 = χ0**-2 * ( 2 -(1+χ0)**0.5 - ((1-χ0)*H0)**0.5 )
fχso = χso**-2 * ( 2 - (1+χso)**0.5 - ((1-χso)*Hso)**0.5 )
H0 = H(χ0-1)
Hso = H(χso-1)
ε2 = A/(ħω)**2 * ( ((ħω-E0)*H0)**0.5 + 0.5*((ħω-E0-Δ0)*Hso)**0.5)
ε1 = A*E0**-1.5 * (fχ0+0.5*(E0/(E0+Δ0))**1.5*fχso)
return ε1 + 1j*ε2
def Epsilon_B(ħω): #E1
# ignoring E1+Δ1 contribution - no data on B2 & B21 in the paper
# result seems to reproduce graphical data from the paper
χ1 = ħω/E1
H1 = H(1-χ1)
ε2 = π*χ1**-2*(B1-B11*((E1-ħω)*H1)**0.5)
ε2 *= H(ε2) #undocumented trick: ignore negative ε2
χ1 = (ħω+1j*Γ)/E1
ε1 = -B1*χ1**-2*np.log(1-χ1**2)
return ε1.real + 1j*ε2.real
def Epsilon_C(ħω): #E2
χ2 = ħω/E2
ε2 = C*χ2*γ / ((1-χ2**2)**2+(χ2*γ)**2)
ε1 = C*(1-χ2**2) / ((1-χ2**2)**2+(χ2*γ)**2)
return ε1 + 1j*ε2
def Epsilon_D(ħω): #Eg
# ignoring ħωq - no data in the paper
# result seems to reproduce graphical data from the paper
Ech = E1
χg = Eg/ħω
χch = ħω/Ech
Hg = H(1-χg)
Hch = H(1-χch)
ε2 = D/ħω**2 * (ħω-Eg)**2 * Hg * Hch
return 1j*ε2
ev_min=0.1
ev_max=6
npoints=200
eV = np.linspace(ev_min, ev_max, npoints)
μm = 4.13566733e-1*2.99792458/eV
εA = Epsilon_A(eV)
εB = Epsilon_B(eV)
εC = Epsilon_C(eV)
εD = Epsilon_D(eV)
ε = εA + εB + εC + εD + εinf
n = (ε**.5).real
k = (ε**.5).imag
α = 4*π*k/μm*1e4 #1/cm
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
#plot ε1 vs eV
plt.figure(1)
plt.plot(eV, ε.real, label="ε1")
plt.plot(eV, εA.real, label="Re(εA)")
plt.plot(eV, εB.real, label="Re(εB)")
plt.plot(eV, εC.real, label="Re(εC)")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε1')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot ε2 vs eV
plt.figure(2)
plt.plot(eV, ε.imag, label="ε2")
plt.plot(eV, εA.imag, label="Im(εA)")
plt.plot(eV, εB.imag, label="Im(εB)")
plt.plot(eV, εC.imag, label="Im(εC)")
plt.plot(eV, εD.imag, label="Im(εD)")
plt.yscale('log')
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε2')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
plt.ylim([1e-2,1e2])
#plot n,k vs eV
plt.figure(3)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(4)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot α vs eV
plt.figure(7)
plt.plot(eV,α)
plt.yscale('log')
plt.ylim([1e3,1e7])
plt.xlabel('Photon energy (eV)')
plt.ylabel('α (1/cm)') | gpl-3.0 |
endolith/numpy | numpy/linalg/linalg.py | 3 | 89527 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import functools
import operator
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
swapaxes, divide, count_nonzero, isnan, sign, argsort, sort
)
from numpy.core.multiarray import normalize_axis_index
from numpy.core.overrides import set_module
from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy.linalg')
fortran_int = intc
@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
def _determine_error_states():
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
return [bufsize, invalid_call_errmask, None]
# Dealing with errors in _umath_linalg
_linalg_error_extobj = _determine_error_states()
del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def _raise_linalgerror_lstsq(err, flag):
raise LinAlgError("SVD did not converge in Linear Least Squares")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assert_finite(*arrays):
for a in arrays:
if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def transpose(a):
"""
Transpose each matrix in a stack of matrices.
Unlike np.transpose, this only swaps the last two axes, rather than all of
them
Parameters
----------
a : (...,M,N) array_like
Returns
-------
aT : (...,N,M) ndarray
"""
return swapaxes(a, -1, -2)
# Linear equations
def _tensorsolve_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def _solve_dispatcher(a, b):
return (a, b)
@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
See Also
--------
scipy.linalg.solve : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def _tensorinv_dispatcher(a, ind=None):
return (a,)
@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def _unary_dispatcher(a):
return (a,)
@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
See Also
--------
scipy.linalg.inv : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.25, 0.75],
[ 0.75, -0.25]]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
def _matrix_power_dispatcher(a, n):
return (a,)
@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
.. note:: Stacks of object matrices are not currently supported.
Parameters
----------
a : (..., M, M) array_like
Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
a**n : (..., M, M) ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
For matrices that are not square or that (for negative powers) cannot
be inverted numerically.
Examples
--------
>>> from numpy.linalg import matrix_power
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
a = asanyarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
try:
n = operator.index(n)
except TypeError as e:
raise TypeError("exponent must be an integer") from e
# Fall back on dot for object arrays. Object arrays are not supported by
# the current implementation of matmul using einsum
if a.dtype != object:
fmatmul = matmul
elif a.ndim == 2:
fmatmul = dot
else:
raise NotImplementedError(
"matrix_power not supported for stacks of object arrays")
if n == 0:
a = empty_like(a)
a[...] = eye(a.shape[-2], dtype=a.dtype)
return a
elif n < 0:
a = inv(a)
n = abs(n)
# short-cuts.
if n == 1:
return a
elif n == 2:
return fmatmul(a, a)
elif n == 3:
return fmatmul(fmatmul(a, a), a)
# Use binary decomposition to reduce the number of matrix multiplications.
# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to
# increasing powers of 2, and multiply into the result as needed.
z = result = None
while n > 0:
z = a if z is None else fmatmul(z, z)
n, bit = divmod(n, 2)
if bit:
result = z if result is None else fmatmul(result, z)
return result
# Cholesky decomposition
@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. No
checking is performed to verify whether `a` is Hermitian or not.
In addition, only the lower-triangular and diagonal elements of `a`
are used. Only `L` is actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
See Also
--------
scipy.linalg.cholesky : Similar function in SciPy.
scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian
positive-definite matrix.
scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in
`scipy.linalg.cho_solve`.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[1.+0.j, 0.-2.j],
[0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[1.+0.j, 0.+0.j],
[0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decomposition
def _qr_dispatcher(a, mode=None):
return (a,)
@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
See Also
--------
scipy.linalg.qr : Similar function in SciPy.
scipy.linalg.rq : Compute RQ decomposition of a matrix.
Notes
-----
This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = np.linalg.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(np.linalg.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assert_2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
scipy.linalg.eigvals : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def _eigvalsh_dispatcher(a, UPLO=None):
return (a,)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
scipy.linalg.eigvalsh : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
(conjugate symmetric) array.
scipy.linalg.eig : Similar function in SciPy that also solves the
generalized eigenvalue problem.
scipy.linalg.schur : Best choice for unitary and other non-Hermitian
normal matrices.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent and `a` can be diagonalized by
a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.
For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`
is preferred because the matrix `v` is guaranteed to be unitary, which is
not the case when using `eig`. The Schur factorization produces an
upper triangular matrix rather than a diagonal matrix, but for normal
matrices only the diagonal of the upper triangular matrix is needed, the
rest is roundoff error.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``y.T @ a = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([1., 2., 3.])
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([1.+1.j, 1.-1.j])
array([[0.70710678+0.j , 0.70710678-0.j ],
[0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([2.+0.j, 0.+0.j])
array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
[ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([1., 1.])
array([[1., 0.],
[0., 1.]])
"""
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
_assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
(conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
scipy.linalg.eigh : Similar function in SciPy (but also solves the
generalized eigenvalue problem).
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
[ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[5.+2.j, 9.-2.j],
[0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[5.+0.j, 0.-2.j],
[0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([1., 6.])
array([6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
[ 0. +0.89442719j, 0. -0.4472136j ]])
array([[ 0.89442719+0.j , -0. +0.4472136j],
[-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
return (a,)
@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex array with ``a.ndim >= 2``.
full_matrices : bool, optional
If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and
``(..., N, N)``, respectively. Otherwise, the shapes are
``(..., M, K)`` and ``(..., K, N)``, respectively, where
``K = min(M, N)``.
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) array
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } array
Unitary array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
scipy.linalg.svd : Similar function in SciPy.
scipy.linalg.svdvals : Compute singular values of a matrix.
Notes
-----
.. versionchanged:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine ``_gesdd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``np.matmul`` for python versions below 3.5.)
If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are
all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)
Reconstruction based on full SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((9, 9), (6,), (6, 6))
>>> np.allclose(a, np.dot(u[:, :6] * s, vh))
True
>>> smat = np.zeros((9, 6), dtype=complex)
>>> smat[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = np.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
Reconstruction based on full SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=True)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))
True
Reconstruction based on reduced SVD, 4D case:
>>> u, s, vh = np.linalg.svd(b, full_matrices=False)
>>> u.shape, s.shape, vh.shape
((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))
>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))
True
>>> np.allclose(b, np.matmul(u, s[..., None] * vh))
True
"""
import numpy as _nx
a, wrap = _makearray(a)
if hermitian:
# note: lapack svd returns eigenvalues with s ** 2 sorted descending,
# but eig returns s sorted ascending, so we re-order the eigenvalues
# and related arrays to have the correct order
if compute_uv:
s, u = eigh(a)
sgn = sign(s)
s = abs(s)
sidx = argsort(s)[..., ::-1]
sgn = _nx.take_along_axis(sgn, sidx, axis=-1)
s = _nx.take_along_axis(s, sidx, axis=-1)
u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)
# singular values are unsigned, move the sign into v
vt = transpose(u * sgn[..., None, :]).conjugate()
return wrap(u), s, wrap(vt)
else:
s = eigvalsh(a)
s = s[..., ::-1]
s = abs(s)
return sort(s)[..., ::-1]
_assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m, n = a.shape[-2:]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vh = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vh = vh.astype(result_t, copy=False)
return wrap(u), s, wrap(vh)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def _cond_dispatcher(x, p=None):
return (x,)
@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746 # may vary
>>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
if _is_empty_2d(x):
raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
if p == -2:
r = s[..., -1] / s[..., 0]
else:
r = s[..., 0] / s[..., -1]
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
_assert_stacked_2d(x)
_assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
invx = _umath_linalg.inv(x, signature=signature)
r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))
r = r.astype(result_t, copy=False)
# Convert nans to infs unless the original array had nan entries
r = asarray(r)
nan_mask = isnan(r)
if nan_mask.any():
nan_mask &= ~isnan(x).any(axis=(-2, -1))
if r.ndim > 0:
r[nan_mask] = Inf
elif nan_mask:
r[()] = Inf
# Convention is to return scalars instead of 0d arrays
if r.ndim == 0:
r = r[()]
return r
def _matrix_rank_dispatcher(M, tol=None, hermitian=None):
return (M,)
@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
Input vector or stack of matrices.
tol : (...) array_like, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.14
Returns
-------
rank : (...) array_like
Rank of M.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False, hermitian=hermitian)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return count_nonzero(S > tol, axis=-1)
# Generalized inverse
def _pinv_dispatcher(a, rcond=None, hermitian=None):
return (a,)
@array_function_dispatch(_pinv_dispatcher)
def pinv(a, rcond=1e-15, hermitian=False):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
.. versionadded:: 1.17.0
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
scipy.linalg.pinv : Similar function in SciPy.
scipy.linalg.pinv2 : Similar function in SciPy (SVD-based).
scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a
Hermitian matrix.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
sign = sign.astype(result_t, copy=False)
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
scipy.linalg.det : Similar function in SciPy.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine ``z/dgetrf``.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0 # may vary
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assert_stacked_2d(a)
_assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def _lstsq_dispatcher(a, b, rcond=None):
return (a, b)
@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
Computes the vector x that approximatively solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation. Else, `x` minimizes the
Euclidean 2-norm :math:`|| b - a x ||`.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
See Also
--------
scipy.linalg.lstsq : Similar function in SciPy.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
>>> m, c
(1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> _ = plt.legend()
>>> plt.show()
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
# FIXME: real_t is unused
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
if m <= n:
gufunc = _umath_linalg.lstsq_m
else:
gufunc = _umath_linalg.lstsq_n
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
if n_rhs == 0:
# lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
if m == 0:
x[...] = 0
if n_rhs == 0:
# remove the item we added
x = x[..., :n_rhs]
resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
x = x.squeeze(axis=-1)
# we probably should squeeze resids too, but we can't
# without breaking compatibility.
# as documented
if rank != n or m <= n:
resids = array([], result_real_t)
# coerce output arrays
s = s.astype(result_real_t, copy=False)
resids = resids.astype(result_real_t, copy=False)
x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed
return wrap(x), wrap(resids), rank, s
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=False), axis=-1)
return result
def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
return (x,)
@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
is None. If both `axis` and `ord` are None, the 2-norm of
``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object. The default is None.
axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
is None.
.. versionadded:: 1.8.0
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
See Also
--------
scipy.linalg.norm : Similar function in SciPy.
Notes
-----
For values of ``ord < 1``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
Both the Frobenius and nuclear norm orders are only defined for
matrices and raise a ValueError when ``x.ndim != 2``.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
0.0
>>> LA.norm(b, -2)
1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
5.8480354764257312 # may vary
>>> LA.norm(a, -3)
0.0
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception as e:
raise TypeError("'axis' must be None, an integer or a tuple of integers") from e
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
# None of the str-type keywords for ord ('fro', 'nuc')
# are valid for vectors
elif isinstance(ord, str):
raise ValueError(f"Invalid norm order '{ord}' for vectors")
else:
absx = abs(x)
absx **= ord
ret = add.reduce(absx, axis=axis, keepdims=keepdims)
ret **= (1 / ord)
return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def _multidot_dispatcher(arrays, *, out=None):
yield from arrays
yield out
@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays, *, out=None):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a, b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
.. versionadded:: 1.19.0
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random((10000, 100))
>>> B = np.random.random((100, 1000))
>>> C = np.random.random((1000, 5))
>>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
>>> _ = multi_dot([A, B, C, D])
instead of::
>>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1], out=out)
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1, out=out)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C, out=None):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C, out=out)
else:
return dot(A, dot(B, C), out=out)
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j, out=None):
"""Actually do the multiplication with the given order."""
if i == j:
# the initial call with non-None out should never get here
assert out is None
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j),
out=out)
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 12 | 9004 | import numpy as np
from scipy import linalg
from sklearn.decomposition import (NMF, ProjectedGradientNMF,
non_negative_factorization)
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = NMF(solver=solver, n_components=4, init='nndsvd', random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('pg', 'cd'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
A_new = m.inverse_transform(t)
assert_array_almost_equal(A, A_new, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = ProjectedGradientNMF(n_components=5, random_state=0, tol=tol).fit(A)
data_sp = ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = NMF(solver=solver, random_state=0, tol=1e-4, n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
| bsd-3-clause |
jdavidrcamacho/Tests_GP | MSc_results/speed_test2.py | 2 | 5047 | # -*- coding: utf-8 -*-
import Gedi as gedi
import george as ge
import numpy as np
import matplotlib.pyplot as pl; pl.close('all')
from time import time
nrep = 100
pontos=[]
temposES=[]
georgeES=[]
temposESS=[]
georgeESS=[]
temposRQ=[]
georgeRQ=[]
lista=[10,20,50,100,200,500]
for i0, i in enumerate(lista):
print i
pontos.append(i)
np.random.seed(100)
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
av = []
for _ in range(nrep):
start = time()
kernel1=gedi.kernel.ExpSquared(19.0, 2.0) +\
gedi.kernel.WhiteNoise(2.0)
gedi.kernel_likelihood.build_matrix(kernel1, x, y, yerr)
tempo1= time() - start
av.append(tempo1)
temposES.append(sum(av) / float(nrep))
######################################
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 19**2*ge.kernels.ExpSquaredKernel(2.0**2) +\
ge.kernels.WhiteKernel(2.0)
gp = ge.GP(kernelg1)
gp.compute(x,yerr)
#gp.lnlikelihood(y)
tempog1= time() - start
av.append(tempog1)
georgeES.append(sum(av) / float(nrep))
###############################################################################
av = []
for _ in range(nrep):
start = time()
kernel1=gedi.kernel.ExpSineSquared(19.0, 2.0, 5.0) +\
gedi.kernel.WhiteNoise(2.0)
gedi.kernel_likelihood.build_matrix(kernel1, x, y, yerr)
tempo1= time() - start
av.append(tempo1)
temposESS.append(sum(av) / float(nrep))
######################################
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 19**2*ge.kernels.ExpSine2Kernel(2./2.0**2,5.0) +\
ge.kernels.WhiteKernel(2.0)
gp = ge.GP(kernelg1)
gp.compute(x,yerr)
#gp.lnlikelihood(y)
tempog1= time() - start
av.append(tempog1)
georgeESS.append(sum(av) / float(nrep))
###############################################################################
av = []
for _ in range(nrep):
start = time()
kernel1=gedi.kernel.RatQuadratic(19.0, 2.0, 100.0) +\
gedi.kernel.WhiteNoise(2.0)
gedi.kernel_likelihood.build_matrix(kernel1, x, y, yerr)
tempo1= time() - start
av.append(tempo1)
temposRQ.append(sum(av) / float(nrep))
######################################
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 19**2*ge.kernels.RationalQuadraticKernel(100,2.0**2) +\
ge.kernels.WhiteKernel(2.0)
gp = ge.GP(kernelg1)
gp.compute(x,yerr)
#gp.lnlikelihood(y)
tempog1= time() - start
av.append(tempog1)
georgeRQ.append(sum(av) / float(nrep))
##### Graphs
N = pontos
f, (ax1, ax2, ax3) = pl.subplots(1, 3, sharey=True)
ax1.loglog(N, temposES, 'b-o')
ax1.loglog(N, georgeES, 'b--')
ax1.legend(['gedi ES+WN','george ES+WN'],loc='upper left')
ax1.set_ylabel('Time')
ax2.loglog(N, temposESS, 'b-o')
ax2.loglog(N, georgeESS, 'b--')
ax2.legend(['gedi ESS+WN','george ESS+WN'],loc='upper left')
ax2.set_xlabel('Number of points')
ax3.loglog(N, temposRQ, 'b-o')
ax3.loglog(N, georgeRQ, 'b--')
ax3.legend(['gedi RQ+WN','george RQ+WN'],loc='upper left')
f.savefig('speedtest_2.png')
##### Old stuff
#nrep = 3
#pontos=[]
#temposESS=[]
#georgeESS=[]
#
#for i in np.arange(100,1100,100):
# print i
# pontos.append(i)
# np.random.seed(100)
# x = 10 * np.sort(np.random.rand(2*i))
# yerr = 0.2 * np.ones_like(x)
# y = np.sin(x) + yerr * np.random.randn(len(x))
#
# av = []
# for _ in range(nrep):
# start = time()
# kernel2 = gedi.kernel.ExpSineSquared(15.0, 2.0, 10.0) +\
# gedi.kernel.WhiteNoise(2.0)
# gedi.kernel_likelihood.build_matrix(kernel2, x, y, yerr)
# tempo1= time() - start
# av.append(tempo1)
# temposESS.append(sum(av) / float(nrep))
#
# ###########################################################################
#
# av = []
# for _ in range(nrep):
# start = time() # Calculation using george
# kernelg2 = 15.0**2*ge.kernels.ExpSine2Kernel(2/2.0**2,10.0) +\
# ge.kernels.WhiteKernel(2.0)
# gp = ge.GP(kernelg2)
# gp.compute(x,yerr)
# # gp.lnlikelihood(y)
# tempog2 = time() - start
# av.append(tempog2)
# georgeESS.append(sum(av) / float(nrep))
#
#N = pontos
#
#pl.figure()
#pl.loglog(N, temposESS, 'b-o')
#pl.loglog(N, georgeESS, 'b--')
#
#pl.xlim(0.9*N[0], 1.1*N[-1])
#pl.xlabel('Number of points')
#pl.ylabel('Time')
##pl.title('Covariance matrix calculations')
#pl.legend(['gedi ESS+WN','george ESS+WN'], loc='upper left')
#pl.xticks(fontsize = 18);pl.yticks(fontsize=18)
#pl.savefig('speedtest_2.png')
##pl.close('all') | mit |
xzh86/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
jaredleekatzman/DeepSurv | deepsurv/utils.py | 1 | 3139 | '''
Utility functions for running DeepSurv experiments
'''
import h5py
import scipy.stats as st
from collections import defaultdict
import numpy as np
import pandas as pd
import copy
import lasagne
def load_datasets(dataset_file):
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
def format_dataset_to_df(dataset, duration_col, event_col, trt_idx = None):
xdf = pd.DataFrame(dataset['x'])
if trt_idx is not None:
xdf = xdf.rename(columns={trt_idx : 'treat'})
dt = pd.DataFrame(dataset['t'], columns=[duration_col])
censor = pd.DataFrame(dataset['e'], columns=[event_col])
cdf = pd.concat([xdf, dt, censor], axis=1)
return cdf
def standardize_dataset(dataset, offset, scale):
norm_ds = copy.deepcopy(dataset)
norm_ds['x'] = (norm_ds['x'] - offset) / scale
return norm_ds
def bootstrap_metric(metric_fxn, dataset, N=100):
def sample_dataset(dataset, sample_idx):
sampled_dataset = {}
for (key,value) in dataset.items():
sampled_dataset[key] = value[sample_idx]
return sampled_dataset
metrics = []
size = len(dataset['x'])
for _ in range(N):
resample_idx = np.random.choice(size, size=size, replace = True)
metric = metric_fxn(**sample_dataset(dataset, resample_idx))
metrics.append(metric)
# Find mean and 95% confidence interval
mean = np.mean(metrics)
conf_interval = st.t.interval(0.95, len(metrics)-1, loc=mean, scale=st.sem(metrics))
return {
'mean': mean,
'confidence_interval': conf_interval
}
def get_optimizer_from_str(update_fn):
if update_fn == 'sgd':
return lasagne.updates.sgd
elif update_fn == 'adam':
return lasagne.updates.adam
elif update_fn == 'rmsprop':
return lasagne.updates.rmsprop
return None
def calculate_recs_and_antirecs(rec_trt, true_trt, dataset, print_metrics=True):
if isinstance(true_trt, int):
true_trt = dataset['x'][:,true_trt]
# trt_values = zip([0,1],np.sort(np.unique(true_trt)))
trt_values = enumerate(np.sort(np.unique(true_trt)))
equal_trt = [np.logical_and(rec_trt == rec_value, true_trt == true_value) for (rec_value, true_value) in trt_values]
rec_idx = np.logical_or(*equal_trt)
# original Logic
# rec_idx = np.logical_or(np.logical_and(rec_trt == 1,true_trt == 1),
# np.logical_and(rec_trt == 0,true_trt == 0))
rec_t = dataset['t'][rec_idx]
antirec_t = dataset['t'][~rec_idx]
rec_e = dataset['e'][rec_idx]
antirec_e = dataset['e'][~rec_idx]
if print_metrics:
print("Printing treatment recommendation metrics")
metrics = {
'rec_median' : np.median(rec_t),
'antirec_median' : np.median(antirec_t)
}
print("Recommendation metrics:", metrics)
return {
'rec_t' : rec_t,
'rec_e' : rec_e,
'antirec_t' : antirec_t,
'antirec_e' : antirec_e
}
| mit |
huongttlan/statsmodels | statsmodels/datasets/template_data.py | 31 | 1680 | #! /usr/bin/env python
"""Name of dataset."""
__docformat__ = 'restructuredtext'
COPYRIGHT = """E.g., This is public domain."""
TITLE = """Title of the dataset"""
SOURCE = """
This section should provide a link to the original dataset if possible and
attribution and correspondance information for the dataset's original author
if so desired.
"""
DESCRSHORT = """A short description."""
DESCRLONG = """A longer description of the dataset."""
#suggested notes
NOTE = """
::
Number of observations:
Number of variables:
Variable name definitions:
Any other useful information that does not fit into the above categories.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=0, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=0, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/DatasetName.csv', 'rb'),
delimiter=",", names=True, dtype=float)
return data
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2013/dnn_layerwise/bs1024/dnn_4layer/src/dataset.py | 37 | 78389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import urllib2
import socket
import locale
import zipfile
import tarfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from ui import *
from general import *
from files import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting ['+str(item_id)+'/'+str(len(self.package_list))+']', note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label_+'_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path,'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'), base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name = 'CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(self.relative_to_absolute_path(os.path.join('chime_home','chunks',row[1]+'.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],item['tag_string'], ';'.join(item['tags'])])
fold+= 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
print self.evaluation_setup_path
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| mit |
jtian0/project-platform | scripts/runtime_boxplot.py | 2 | 6642 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
sns.set_context("paper")
sns.set_style("white")
mpl.rcParams["figure.figsize"] = (13, 3)
mpl.rcParams["lines.linewidth"] = 0.5
mpl.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
## for Palatino and other serif fonts use:
# rc('font',**{'family':'serif','serif':['Palatino']})
mpl.rc('text', usetex=True)
# iter10 _part20 as constant, var interval
interval30_amp2 = [275.954, 280.177, 274.320, 274.743, 274.112, 364.728, 276.615, 274.091, 273.943, 604.349, 277.105,
274.040, 454.166, 277.019, 634.124, 276.679, 454.129, 276.563, 276.512, 514.058]
interval30_amp4 = [753.046, 395.139, 275.336, 333.899, 244.697, 272.489, 305.703, 724.983, 274.867, 272.489, 244.458,
272.264, 782.984, 545.207, 275.018, 602.861, 244.903, 242.275, 242.250, 242.554]
interval30_amp6 = [274.227, 245.370, 247.268, 576.829, 276.689, 334.630, 878.155, 308.197, 936.804, 637.392, 396.147,
667.525, 277.071, 454.492, 246.794, 304.515, 246.589, 274.221, 394.033, 246.293]
interval60_amp2 = [248.125, 243.553, 243.970, 244.187, 366.437, 248.305, 484.267, 366.576, 247.099, 244.225, 303.381,
243.680, 242.977, 243.444, 303.270, 303.174, 245.902, 243.084, 243.073, 304.008]
interval60_amp4 = [545.600, 427.342, 307.065, 304.720, 244.604, 244.308, 665.223, 607.450, 246.906, 424.974, 426.164,
306.752, 305.664, 304.131, 304.441, 785.453, 546.719, 246.498, 364.603, 426.376]
interval60_amp6 = [424.566, 307.057, 904.409, 306.635, 1084.677, 305.442, 244.700, 253.171, 364.849, 1027.450, 246.784,
244.335, 244.243, 244.818, 243.903, 244.228, 425.359, 306.190, 247.000, 244.252]
# iter10 _part10 as constant
iter10_part10_amp2 = [363.034, 453.569, 725.110, 364.962, 362.738, 452.759, 425.189, 486.064, 602.718, 364.927, 362.567,
362.965, 392.682, 790.790, 395.218, 574.775, 575.250, 634.965, 574.736, 487.021]
iter10_part10_amp4 = [393.818, 368.127, 366.250, 395.262, 364.339, 903.560, 366.226, 1055.606, 366.253, 363.802,
395.990, 603.701, 907.099, 1147.173, 365.873, 933.898, 367.646, 875.401, 636.088]
iter10_part10_amp6 = [1058.825, 494.107, 429.667, 653.132, 367.888, 697.416, 365.107, 368.537, 372.947, 850.616,
1043.743, 367.873, 395.362, 815.313, 367.576, 365.072, 364.758, 396.724, 367.339, 424.457]
# iter20 _part10 as constant
# iter20_part10_amp2 = [100, 200, 100, 200, 350, 199, 200]
# iter20_part10_amp4 = [1084.637, 604.134, 1775.890, 605.360, 1353.516, 696.253, 1234.321, 1114.661, 784.178, 1086.745, 604.087, 964.222, 786.007, 1084.233, 604.666, 1115.648, 1534.269, 1085.292, 633.834, 604.117]
# iter20_part10_amp6 = [100, 200, 100, 200, 350, 199, 200]
# iter10 _part20 as constant
iter10_part20_amp2 = [275.954, 280.177, 274.320, 274.743, 274.112, 364.728, 276.615, 274.091, 273.943, 604.349, 277.105,
274.040, 454.166, 277.019, 634.124, 276.679, 454.129, 276.563, 276.512, 514.058]
iter10_part20_amp4 = [753.046, 395.139, 275.336, 333.899, 244.697, 272.489, 305.703, 724.983, 274.867, 272.489, 244.458,
272.264, 782.984, 545.207, 275.018, 602.861, 244.903, 242.275, 242.250, 242.554]
iter10_part20_amp6 = [274.227, 245.370, 247.268, 576.829, 276.689, 334.630, 878.155, 308.197, 936.804, 637.392, 396.147,
667.525, 277.071, 454.492, 246.794, 304.515, 246.589, 274.221, 394.033, 246.293, 276.040, 350.617,
245.184, 247.868, 289.893, 816.280, 279.209, 965.133, 1058.186, 397.964]
# iter20 _part20
iter20_part20_amp2 = [996.507, 727.154, 577.260, 427.369, 431.658, 934.310, 426.683, 844.425, 396.849, 904.318, 966.987,
847.059, 786.707, 576.871, 426.017, 813.947, 936.268, 992.810, 395.078, 902.994]
iter20_part20_amp4 = [996.114, 426.214, 605.763, 1146.145, 995.298, 1235.016, 605.947, 395.353, 1143.801, 1176.241,
1174.518, 1267.214, 396.527, 395.063, 1174.503, 396.837, 1114.501, 396.649, 394.126, 394.754]
iter20_part20_amp6 = [664.679, 427.193, 814.606, 457.204, 396.702, 424.584, 427.767, 1354.478, 425.032, 546.192,
934.660, 606.753, 396.703, 394.381, 394.188, 1025.015, 394.468, 604.779, 398.245, 1054.312]
# interval30_vs_interval60
runtime_1 = [interval30_amp2, interval60_amp2, interval30_amp4, interval60_amp4, interval30_amp6, interval60_amp6]
# part10_vs_part20
runtime_2 = [iter10_part10_amp2, iter10_part20_amp2, iter10_part10_amp4, iter10_part20_amp4, iter10_part10_amp6,
iter10_part20_amp6]
# iter10_vs_iter20
runtime_3 = [iter10_part20_amp2, iter20_part20_amp2, iter10_part20_amp4, iter20_part20_amp4, iter10_part20_amp6,
iter20_part20_amp6]
def flattened_df(dataset, var1, var2, cols):
"""
:param dataset: in this case, runtime
:param var1: constant, extend to same length with each sample runtime
:param var2: ditto
:param cols: title of dataframe
:return: pandas dataframe
"""
df = pd.DataFrame()
for i, line in enumerate(dataset):
zipped = list(zip(line, [var1[i]] * len(line), [var2[i]] * len(line)))
sec = pd.DataFrame(zipped, columns=cols)
df = pd.concat([df, sec], ignore_index=True)
return df
df_runtime_1 = flattened_df(runtime_1,
[30, 60, 30, 60, 30, 60],
[2, 2, 4, 4, 6, 6],
["runtime", "interval", "amplitude"])
df_runtime_2 = flattened_df(runtime_2,
[10, 20, 10, 20, 10, 20],
[2, 2, 4, 4, 6, 6],
["runtime", "part", "amplitude"])
df_runtime_3 = flattened_df(runtime_3,
[10, 20, 10, 20, 10, 20],
[2, 2, 4, 4, 6, 6],
["runtime", "iter", "amplitude"])
data = [df_runtime_1, df_runtime_2, df_runtime_3]
vars = ["interval", "part", "iter"]
titles = ["Running Time Comparison of" + i + " PageRank Application" for i in
["\n30- and 60-second-MTBA", "\n10- and 20-partition", "\n10- and 20-iteration"]]
fig, axes = plt.subplots(nrows=1, ncols=3, sharey=True)
for i, (ax, datum, var, title) in enumerate(zip(axes, data, vars, titles)):
if i != 0:
ax.yaxis.label.set_visible(False)
ax.set_title(title)
sns.boxplot(x="amplitude", y="runtime", hue=var, width=0.5, data=datum, palette="Paired", ax=ax)
# plt.show()
plt.savefig("box.pdf", format="pdf", dpi=300, bbox_inches="tight")
| mit |
jpautom/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/plotting/_misc.py | 2 | 18195 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import numpy as np
from pandas.util._decorators import deprecate_kwarg
from pandas.core.dtypes.missing import notna
from pandas.compat import range, lrange, lmap, zip
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._style import _get_standard_colors
from pandas.plotting._tools import _subplots, _set_ticks_props
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault('edgecolors', 'none')
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes
def _get_marker_compat(marker):
import matplotlib.lines as mlines
import matplotlib as mpl
if mpl.__version__ < '1.1.0' and marker == '.':
return 'o'
if marker not in mlines.lineMarkers:
return 'o'
return marker
def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""RadViz - a multivariate data visualization algorithm
Parameters:
-----------
frame: DataFrame
class_column: str
Column name containing class names
ax: Matplotlib axis object, optional
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib scatter plotting method
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame')
def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generates a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters:
-----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color: list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds: keywords
Options to pass to matplotlib plotting method
Returns:
--------
ax: Matplotlib axis object
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set([])
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""Bootstrap plot.
Parameters:
-----------
series: Time series
fig: matplotlib figure object, optional
size: number of data points to consider during each sampling
samples: number of times the bootstrap procedure is performed
kwds: optional keyword arguments for plotting commands, must be accepted
by both hist and plot
Returns:
--------
fig: matplotlib figure
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig
@deprecate_kwarg(old_arg_name='colors', new_arg_name='color')
@deprecate_kwarg(old_arg_name='data', new_arg_name='frame', stacklevel=3)
def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, axvlines_kwds=None, sort_labels=False,
**kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame: DataFrame
class_column: str
Column name containing class names
cols: list, optional
A list of column names to use
ax: matplotlib.axis, optional
matplotlib axis object
color: list or tuple, optional
Colors to use for the different classes
use_columns: bool, optional
If true, columns will be used as xticks
xticks: list or tuple, optional
A list of values to use for xticks
colormap: str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines: bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds: keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels: bool, False
Sort class_column labels, useful when assigning colours
.. versionadded:: 0.20.0
kwds: keywords
Options to pass to matplotlib plotting method
Returns
-------
ax: matplotlib axis object
Examples
--------
>>> from pandas import read_csv
>>> from pandas.tools.plotting import parallel_coordinates
>>> from matplotlib import pyplot as plt
>>> df = read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> parallel_coordinates(df, 'Name', color=('#556270',
'#4ECDC4', '#C7F464'))
>>> plt.show()
"""
if axvlines_kwds is None:
axvlines_kwds = {'linewidth': 1, 'color': 'black'}
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set([])
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax
def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters:
-----------
series: Time series
lag: lag of the scatter plot, default 1
ax: Matplotlib axis object, optional
kwds: Matplotlib scatter method keyword arguments, optional
Returns:
--------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + %s)" % lag)
ax.scatter(y1, y2, **kwds)
return ax
def autocorrelation_plot(series, ax=None, **kwds):
"""Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
ax: Matplotlib axis object
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax
| gpl-2.0 |
jmmease/pandas | pandas/tests/io/msgpack/test_case.py | 13 | 2740 | # coding: utf-8
from pandas.io.msgpack import packb, unpackb
def check(length, obj):
v = packb(obj)
assert len(v) == length, \
"%r length should be %r but get %r" % (obj, length, len(v))
assert unpackb(v, use_list=0) == obj
def test_1():
for o in [None, True, False, 0, 1, (1 << 6), (1 << 7) - 1, -1,
-((1 << 5) - 1), -(1 << 5)]:
check(1, o)
def test_2():
for o in [1 << 7, (1 << 8) - 1, -((1 << 5) + 1), -(1 << 7)]:
check(2, o)
def test_3():
for o in [1 << 8, (1 << 16) - 1, -((1 << 7) + 1), -(1 << 15)]:
check(3, o)
def test_5():
for o in [1 << 16, (1 << 32) - 1, -((1 << 15) + 1), -(1 << 31)]:
check(5, o)
def test_9():
for o in [1 << 32, (1 << 64) - 1, -((1 << 31) + 1), -(1 << 63), 1.0, 0.1,
-0.1, -1.0]:
check(9, o)
def check_raw(overhead, num):
check(num + overhead, b" " * num)
def test_fixraw():
check_raw(1, 0)
check_raw(1, (1 << 5) - 1)
def test_raw16():
check_raw(3, 1 << 5)
check_raw(3, (1 << 16) - 1)
def test_raw32():
check_raw(5, 1 << 16)
def check_array(overhead, num):
check(num + overhead, (None, ) * num)
def test_fixarray():
check_array(1, 0)
check_array(1, (1 << 4) - 1)
def test_array16():
check_array(3, 1 << 4)
check_array(3, (1 << 16) - 1)
def test_array32():
check_array(5, (1 << 16))
def match(obj, buf):
assert packb(obj) == buf
assert unpackb(buf, use_list=0) == obj
def test_match():
cases = [
(None, b'\xc0'),
(False, b'\xc2'),
(True, b'\xc3'),
(0, b'\x00'),
(127, b'\x7f'),
(128, b'\xcc\x80'),
(256, b'\xcd\x01\x00'),
(-1, b'\xff'),
(-33, b'\xd0\xdf'),
(-129, b'\xd1\xff\x7f'),
({1: 1}, b'\x81\x01\x01'),
(1.0, b"\xcb\x3f\xf0\x00\x00\x00\x00\x00\x00"),
((), b'\x90'),
(tuple(range(15)), (b"\x9f\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09"
b"\x0a\x0b\x0c\x0d\x0e")),
(tuple(range(16)), (b"\xdc\x00\x10\x00\x01\x02\x03\x04\x05\x06\x07"
b"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f")),
({}, b'\x80'),
(dict([(x, x) for x in range(15)]),
(b'\x8f\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06\x06\x07'
b'\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e')),
(dict([(x, x) for x in range(16)]),
(b'\xde\x00\x10\x00\x00\x01\x01\x02\x02\x03\x03\x04\x04\x05\x05\x06'
b'\x06\x07\x07\x08\x08\t\t\n\n\x0b\x0b\x0c\x0c\r\r\x0e\x0e'
b'\x0f\x0f')),
]
for v, p in cases:
match(v, p)
def test_unicode():
assert unpackb(packb('foobar'), use_list=1) == b'foobar'
| bsd-3-clause |
huobaowangxi/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
lpsinger/astropy | astropy/visualization/wcsaxes/transforms.py | 8 | 5762 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file incldues code dervived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from astropy import units as u
from astropy.coordinates import (SkyCoord, frame_transform_graph,
UnitSphericalRepresentation,
BaseCoordinateFrame)
__all__ = ['CurvedTransform', 'CoordinateTransform',
'World2PixelTransform', 'Pixel2WorldTransform']
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._input_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._input_system_name} not found")
else:
self.input_system = frame_cls()
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError("input_system should be a WCS instance, string, or a coordinate frame instance")
if isinstance(self._output_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._output_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._output_system_name} not found")
else:
self.output_system = frame_cls()
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError("output_system should be a WCS instance, string, or a coordinate frame instance")
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
input_coords = input_coords*u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(UnitSphericalRepresentation(x_in, y_in),
frame=self.input_system)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all='ignore'):
c_out = c_in.transform_to(self.output_system)
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
@property
@abc.abstractmethod
def input_dims(self):
"""
The number of input world dimensions
"""
@abc.abstractmethod
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from pixel to world coordinates
"""
has_inverse = True
frame_out = None
@property
@abc.abstractmethod
def output_dims(self):
"""
The number of output world dimensions
"""
@abc.abstractmethod
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
| bsd-3-clause |
molebot/vnpy | vn.datayes/api.py | 10 | 45360 | #encoding: UTF-8
import os
import json
import time
import requests
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread, Timer
from pymongo import MongoClient
from requests.exceptions import ConnectionError
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config.
- domain: string, api domain.
- ssl: boolean, specifes http or https usage.
- version: string, version of the api. Currently 'v1'.
- header: dictionary; the request header which contains
authorization infomation.
"""
head = 'my config'
toke_ = '44ebc0f058981f85382595f9f15f967' + \
'0c7eaf2695de30dd752e8f33e9022baa0'
token = '575593eb7696aec7339224c0fac2313780d8645f68b77369dcb35f8bcb419a0b'
body = {
'ssl': False,
'domain': 'api.wmcloud.com/data',
'version': 'v1',
'header': {
'Connection' : 'keep-alive',
'Authorization': 'Bearer ' + token
}
}
def __init__(self, head=None, token=None, body=None):
"""
Reloaded constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container. The fundamental of all other data
container objects defined within this module.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- History:
- Bar
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class History(BaseDataContainer):
"""
Historical data container. The foundation of all other pandas
DataFrame-like two dimensional data containers for this module.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ...],
'retCode': 1,
'retMsg': 'Success'}.
So the body of data is actually in data['data'], which is
our target when constructing the container.
"""
try:
assert 'data' in data
self.body = pd.DataFrame(data['data'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
class Bar(History):
"""
Historical Bar data container. Inherits from History()
DataFrame-like two dimensional data containers for Bar data.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY_BAR'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [{
'exchangeCD': 'XSHG',
'utcOffset': '+08:00',
'unit': 1,
'currencyCD': 'CNY',
'barBodys': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ... ],
'ticker': '000001',
'shortNM': u'\u4e0a\u8bc1\u6307\u6570'
}, ...(other tickers) ],
'retCode': 1,
'retMsg': 'Success'}.
When requesting 1 ticker, json['data'] layer has only one element;
we expect that this is for data collectioning for multiple tickers,
which is currently impossible nevertheless.
So we want resp.json()['data'][0]['barBodys'] for Bar data contents,
and that is what we go into when constructing Bar.
"""
try:
assert 'data' in data
assert 'barBodys' in data['data'][0]
self.body = pd.DataFrame(data['data'][0]['barBodys'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
#----------------------------------------------------------------------
# Datayes Api class
class PyApi(object):
"""
Python based Datayes Api object.
PyApi should be initialized with a Config json. The config must be complete,
in that once constructed, the private variables like request headers,
tokens, etc. become constant values (inherited from config), and will be
consistantly referred to whenever make requests.
privates
--------
* _config: Config object; a container of all useful settings when making
requests.
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session object.
examples
--------
"""
_config = Config()
# request stuffs
_ssl = False
_domain = ''
_version = 'v1'
_header = dict()
_token = None
_session = requests.session()
def __init__(self, config):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
"""
if config.body:
try:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._version = config.body['version']
self._header = config.body['header']
except KeyError:
msg = '[API]: Unable to configure api; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[API]: Unable to configure api; ' + str(e)
raise VNPAST_ConfigError(msg)
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
else:
self._domain = 'http://' + self._domain
def __access(self, url, params, method='GET'):
"""
request specific data from given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
assert resp.status_code == 201
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise VNPAST_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise VNPAST_RequestError(msg)
#----------------------------------------------------------------------
# directly get methods - Market data
def get_equity_M1_one(self,
start='', end='', secID='000001.XSHG'):
"""
Get 1-minute intraday bar data of one security.
parameters
----------
* start, end: string; Time mark formatted in 'HH:MM'. Specifies the
start/end point of bar. Note that the requested date is the
latest trading day (only one day), and the default start/end time is
'09:30' and min(now, '15:00'). Effective minute bars range from
09:30 - 11:30 in the morning and 13:01 - 15:00 in the afternoon.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
"""
url = '{}/{}/api/market/getBarRTIntraDay.json'.format(
self._domain, self._version)
params = {
'startTime': start,
'endTime': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
print resp.json()
data = Bar(resp.json())
return data
except AssertionError: return 0
def get_equity_M1(self, field='', start='20130701', end='20130730',
secID='000001.XSHG', output='df'):
"""
1-minute bar in a month, currently unavailable.
parameters
----------
* field: string; variables that are to be requested.
* start, end: string; Time mark formatted in 'YYYYMMDD'.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
url = '{}/{}/api/market/getBarHistDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'startDate': start,
'endDate': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = Bar(resp.json())
elif output == 'list':
data = resp.json()['data'][0]['barBodys']
return data
except AssertionError: return 0
def get_equity_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one security.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for securities)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- actPreClosePrice* double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- dealAmount* integer.
- turnoverRate double.
- accumAdjFactor* double.
- negMarketValue* double.
- marketValue* double.
- PE* double.
- PE1* double.
- PB* double.
Field is an optional parameter, default setting returns all fields.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of bar. Start and end are optional parameters. If
start, end and ticker are all specified, default 'one' value will be
abandoned.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange.
* ticker: string; the trading code in the form of '000001'.
* one: string; Date mark formatted in 'YYYYMMDD'.
Specifies one date on which data of all tickers are to be requested.
Note that to get effective json data response, at least one parameter
in {secID, ticker, tradeDate} should be entered.
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktEqud.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
#return resp
except AssertionError: return 0
def get_block_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_repo_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_bond_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one bond instrument.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for bonds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- turnoverRate double.
- dealAmount* integer.
- accrInterest* double.
- YTM(yieldToMaturity)* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktBondd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one future contract.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for future contracts)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- contractObject* string.
- contractMark* string.
- preSettlePrice* double.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol integer.
- turnoverValue integer.
- openInt* integer.
- CHG* double.
- CHG1* double.
- CHGPct* double.
- mainCon* integer (0/1 flag).
- smainCon* integer (0/1 flag).
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFutd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_main_D1(self, field='', start='', end='', mark='',
obj='', main=1, one=20150513):
"""
"""
pass
def get_fund_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one mutual fund.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for funds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
- discount* double.
- discountRatio* double.
- circulationShares* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFundd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_index_D1(self, field='', start='', end='', indexID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one stock index.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for indices)
- indexID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- porgFullName* string.
- exchangeCD string.
- preCloseIndex double.
- openIndex double.
- highestIndex double.
- lowestIndex double.
- closeIndex double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktIdxd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'indexID': indexID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_option_D1(self, field='', start='', end='', secID='',
optID='' ,ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one option contact.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for options)
- secID string.
- optID* string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol double.
- turnoverValue double.
- openInt* integer.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktOptd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'optID': optID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_stockFactor_D1(self, field='', secID='',
ticker='000001', start=20130701, end=20130801):
"""
Get 1-day interday factor data for stocks.
parameters
----------
* field: string; variables that are to be requested.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
url = '{}/{}/api/market/getStockFactorsDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
#----------------------------------------------------------------------
# directly get methods - Fundamental Data
def get_balanceSheet(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtBS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_balanceSheet_bnk(self):
"""
"""
pass
def get_balanceSheet_sec(self):
"""
"""
pass
def get_balanceSheet_ins(self):
"""
"""
pass
def get_balanceSheet_ind(self):
"""
"""
pass
def get_cashFlow(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtCF.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_cashFlow_bnk(self):
"""
"""
pass
def get_cashFlow_sec(self):
"""
"""
pass
def get_cashFlow_ins(self):
"""
"""
pass
def get_cashFlow_ind(self):
"""
"""
pass
def get_incomeStatement(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtIS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_incomeStatement_bnk(self):
"""
"""
pass
def get_incomeStatement_sec(self):
"""
"""
pass
def get_incomeStatement_ins(self):
"""
"""
pass
def get_incomeStatement_ind(self):
"""
"""
pass
#----------------------------------------------------------------------
# multi-threading download for database storage.
def __drudgery(self, id, db, indexType,
start, end, tasks, target):
"""
basic drudgery function.
This method loops over a list of tasks(tickers) and get data using
target api.get_# method for all those tickers.
A new feature 'date' or 'dateTime'(for intraday) will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date(time) mark. With the setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* indexType: string(enum): 'date' or 'datetime', specifies what
is the collection index formatted.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
* target: method; the api.get_# method that is to be called by
drudgery function.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
if indexType == 'date':
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
elif indexType == 'datetime':
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
else:
raise ValueError
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = target(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_equity_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_equity_D1)
def get_future_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_future_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_future_D1)
def get_index_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_index_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_index_D1)
def get_bond_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_bond_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_bond_D1)
def get_fund_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_fund_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_fund_D1)
def get_option_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_option_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_option_D1)
#----------------------------------------------------------------------
def __overlord(self, db, start, end, dName,
target1, target2, sessionNum):
"""
Basic controller of multithreading request.
Generates a list of all tickers, creates threads and distribute
tasks to individual #_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* dName: string; the path of file where all tickers' infomation
are stored in.
* target1: method; targetting api method that overlord calls
to get tasks list.
* target2: method; the corresponding drudgery function.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = target1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = target2,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
def get_equity_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get equity D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/equTicker.json',
target1 = self.get_equity_D1,
target2 = self.get_equity_D1_drudgery,
sessionNum = sessionNum)
def get_future_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get future D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/futTicker.json',
target1 = self.get_future_D1,
target2 = self.get_future_D1_drudgery,
sessionNum = sessionNum)
def get_index_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get index D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/idxTicker.json',
target1 = self.get_index_D1,
target2 = self.get_index_D1_drudgery,
sessionNum = sessionNum)
def get_bond_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get bond D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/bndTicker.json',
target1 = self.get_bond_D1,
target2 = self.get_bond_D1_drudgery,
sessionNum = sessionNum)
def get_fund_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get fund D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/fudTicker.json',
target1 = self.get_fund_D1,
target2 = self.get_fund_D1_drudgery,
sessionNum = sessionNum)
def get_option_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get option D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/optTicker.json',
target1 = self.get_option_D1,
target2 = self.get_option_D1_drudgery,
sessionNum = sessionNum)
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
# to be deprecated
def get_equity_D1_drudgery_(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'date' will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date mark. With the default setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = self.get_equity_D1(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
def get_equity_M1_drudgery(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'dateTime', combined by Y-m-d
formatted date part and H:M time part, will be automatically added into
every json-like documents. It would be a datetime.datetime() timestamp
object. In this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars. Note that to ensure the
success of every requests, the range amid start and end had better be
no more than one month.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
k, n = 1, len(tasks)
for secID in tasks:
try:
data = self.get_equity_M1(start = start,
end = end,
secID = secID,
output = 'list')
map(update_dt, data) # add datetime feature to docs.
coll = db[secID]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_M1_interMonth(self, db, id,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
tasks=[]):
"""
Mid-level wrapper of get equity M1 method.
Get 1-minute bar between specified start year and ending year for
more than one tickers in tasks list.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* id: integer; the ID of wrapper session.
* startYr, endYr: integer; the start and ending year amid which the
1-minute bar data is gotten one month by another employing
get_equity_M1_drudgery() function.
Default values are this year and two years before now.
the complete time range will be sub-divided into months. And threads
are deployed for each of these months.
- example
-------
Suppose .now() is Auguest 15th 2015. (20150815)
startYr, endYr = 2014, 2015.
then two list of strings will be generated:
ymdStringStart = ['20140102','20140202', ... '20150802']
ymdStringEnd = ['20140101','20140201', ... '20150801']
the sub-timeRanges passed to drudgeries will be:
(start, end): (20140102, 20140201), (20140202, 20140301),
..., (20150702, 20150801).
So the actual time range is 20140102 - 20150801.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'02' for k in range(1,13)]
ymdStringStart = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStringEnd = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
k = 0
for t in range(len(ymdStringEnd)-1):
start = ymdStringStart[t]
end = ymdStringEnd[t+1]
subID = str(id) + '_' + str(k)
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (subID, db, start, end, tasks))
thrd.start()
k += 1
def get_equity_M1_all(self, db,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
splitNum=10):
"""
"""
"""
# initialize task list.
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
chunkSize = len(allSecIds)/splitNum
taskLists = [allSecIds[k:k+chunkSize] for k in range(
0, len(allSecIds), chunkSize)]
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStrings = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
print taskLists[0]
print ymdStrings
k = 0
for t in range(len(ymdStrings)-1):
start = ymdStrings[t]
end = ymdStrings[t+1]
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (k, db, start, end, taskLists[0]))
thrd.start()
k += 1
return 1
"""
pass
| mit |
marcharper/stationary | export_to_cpp.py | 1 | 5326 | import os
import pickle
import subprocess
from matplotlib import pyplot as plt
from scipy.misc import comb
import ternary
from stationary.utils.edges import enumerate_states_from_edges
from stationary.processes.incentives import linear_fitness_landscape, fermi
from stationary.processes.incentive_process import (
multivariate_transitions_gen)
def num_states(N, n=3):
"""
Returns the number of states in the discretization of the simplex.
"""
return comb(N+n-1, n-1, exact=True)
def pickle_inv_enumeration(inv_enum, pickle_filename="inv_enum.pickle"):
"""
Pickle the inverse enumeration of states, needed to import the exported
stationary calculation.
"""
with open(pickle_filename, 'wb') as output_file:
pickle.dump(inv_enum, output_file)
def output_enumerated_edges(N, n, edges, filename="enumerated_edges.csv"):
"""
Writes the graph underlying to the Markov process to disk. This is used to
export the computation to a C++ implementation if the number of nodes is
very large.
"""
edges = list(edges)
# Collect all the states from the list of edges
all_states, enum, inv_enum = enumerate_states_from_edges(edges, inverse=True)
# Output enumerated_edges
with open(filename, 'w') as outfile:
outfile.write(str(num_states(N, n)) + "\n")
outfile.write(str(n) + "\n")
for (source, target, weight) in list(edges):
row = [str(enum[source]), str(enum[target]), str.format('%.50f' % weight)]
outfile.write(",".join(row) + "\n")
return inv_enum
def load_pickled_inv_enum(filename="inv_enum.pickle"):
"""
Load the pickled inverse enumerate to translate the stationary states
from the exported calculation.
"""
with open(filename, 'rb') as input_file:
inv_enum = pickle.load(input_file)
return inv_enum
def load_stationary_gen(filename="enumerated_stationary.txt"):
"""
Loads the computed stationary distribution from the exported calculation.
The states are still enumerated.
"""
with open(filename) as input_file:
for line in input_file:
line = line.strip()
state, value = line.split(',')
yield (int(state), float(value))
def stationary_gen(filename="enumerated_stationary.txt",
pickle_filename="inv_enum.pickle"):
"""
Loads the stationary distribution computed by the C++ implementation and
reverses the enumeration.
"""
inv_enum = load_pickled_inv_enum(filename=pickle_filename)
gen = load_stationary_gen(filename=filename)
for enum_state, value in gen:
state = inv_enum[enum_state]
yield (state, value)
def remove_boundary(s):
"""Removes the boundary, which improves some stationary plots visually."""
s1 = dict()
for k, v in s.items():
a, b, c = k
if a * b * c != 0:
s1[k] = v
return s1
def render_stationary(s):
"""
Renders a stationary distribution.
"""
# Put the stationary distribution into a dictionary
d = dict()
for state, value in s:
d[state] = value
N = sum(list(d.keys())[0])
# Plot it
figure, tax = ternary.figure(scale=N)
tax.heatmap(remove_boundary(d), scientific=True, style='triangular',
cmap="jet")
return tax
def stationary_max_min(filename="enumerated_stationary.txt"):
min_ = 1.
max_ = 0.
gen = load_stationary_gen(filename=filename)
for enum_state, value in gen:
if value > max_:
max_ = value
if value < min_:
min_ = value
return max_, min_
def full_example(N, m, mu, beta=1., pickle_filename="inv_enum.pickle",
filename="enumerated_edges.csv"):
"""
Full example of exporting the stationary calculation to C++.
"""
print("Computing graph of the Markov process.")
if not mu:
mu = 3. / 2 * 1. / N
if m is None:
m = [[0, 1, 1], [1, 0, 1], [1, 1, 0]]
iterations = 200 * N
num_types = len(m[0])
fitness_landscape = linear_fitness_landscape(m)
incentive = fermi(fitness_landscape, beta=beta)
edges_gen = multivariate_transitions_gen(
N, incentive, num_types=num_types, mu=mu)
print("Outputting graph to %s" % filename)
inv_enum = output_enumerated_edges(
N, num_types, edges_gen, filename=filename)
print("Saving inverse enumeration to %s" % pickle_filename)
pickle_inv_enumeration(inv_enum, pickle_filename="inv_enum.pickle")
print("Running C++ Calculation")
cwd = os.getcwd()
executable = os.path.join(cwd, "a.out")
subprocess.call([executable, filename, str(iterations)])
print("Rendering stationary to SVG")
vmax, vmin = stationary_max_min()
s = list(stationary_gen(
filename="enumerated_stationary.txt",
pickle_filename="inv_enum.pickle"))
ternary.svg_heatmap(s, N, "stationary.svg", vmax=vmax, vmin=vmin, style='h')
print("Rendering stationary")
tax = render_stationary(s)
tax.ticks(axis='lbr', linewidth=1, multiple=N//3, offset=0.015)
tax.clear_matplotlib_ticks()
plt.show()
if __name__ == '__main__':
N = 180
mu = 1. / N
m = [[0, 1, -1], [-1, 0, 1], [1, -1, 0]]
full_example(N=N, m=m, mu=mu, beta=1.5)
| mit |
luosaiyu/itp2d | scripts/analyze_spectrum.py | 1 | 2979 | #!/usr/bin/env python2
# vim: set fileencoding=utf8
from __future__ import division, with_statement
import sys, bisect, math, h5py, numpy
from scipy import *
from scipy.fftpack import fftfreq
from scipy.integrate import trapz, quad, romberg
from numpy.linalg import lstsq
from matplotlib.pyplot import *
# A helper script for computing and plotting nearest-neighbour level spectra,
# spectral rigidity and other statistical measures used in quantum chaos studies.
# The spectral rigidity. Please see, e.g., H.-J. Stöckmann, Quantum chaos: an
# introduction (2006), page 112.
def rigidity(L, e, n):
if L == 0:
return 0
ds = []
for E in linspace(min(e)+L/2, max(e)-L/2, num=500):
es = linspace(E-L/2, E+L/2, num=50)
b, a = polyfit(es, [n(t) for t in es], 1)
x2s = array([ (n(es[t]) - a - b*es[t])**2 for t in range(len(es)) ])
d = trapz(x2s, es)/L
ds.append(d)
return average(ds)
def main():
filename = (sys.argv[-1] if sys.argv[-1][-3:] == '.h5' else "data/itp2d.h5")
file = h5py.File(filename, 'r')
# read energies
num_converged = file.attrs["num_converged"]
energies = array(file["/final_energies"])[:num_converged]
#energies = range(1000)
#energies.sort()
# calculate NND spectrum
nnd = array([ energies[i] - energies[i-1] for i in range(1, len(energies)) ])
# normalize average distance to one
d = mean(nnd)
nnd /= d
energies /= d
# the normalized spectral staircase
n = lambda t: bisect.bisect(energies, t)
figure()
title("Normalized spectral staircase")
xlabel("$\epsilon$")
ylabel("$N(\epsilon)$")
e = linspace(0,max(energies),num=1000)
plot(e, [ n(t) for t in e ])
xlim(0, e.max())
figure()
title("Spectral rigidity")
xlabel("$L$")
ylabel("$\Delta_3(L)$")
L = linspace(0, 20, num=50)
D = array([ rigidity(l, e, n) for l in L])
# Compute rigidity for the picket-fence spectrum
pftest = array([ rigidity(l, range(1000), lambda t: bisect.bisect(range(1000), t)) for l in L ])
# Compute rigidity for the Poisson spectrum
podata = numpy.random.random_sample(size=1000)
podata.sort()
ponnd = array([ podata[i] - podata[i-1] for i in range(1, len(podata)) ])
podata /= mean(ponnd)
potest = array([ rigidity(l, podata, lambda t: bisect.bisect(podata, t)) for l in L ])
plot(L, D, label="%d energies from data" % len(energies))
plot(L, pftest, label="picket-fence (calculated)")
plot(L, potest, label="Poisson (calculated)")
plot(L, L/15, ':', label="Poisson")
plot(L, log(L)/pi**2 - 0.007, ':', label="GOE, $L>>1$")
plot(L, [1/12]*len(L), ':', label="picket-fence")
legend(loc="center right")
xlim(0, max(L))
ylim(0, 0.4)
figure()
title("Nearest neighbour level spacing spectrum")
xlabel('NNLS')
ylabel('Frequency')
n, bins, patches = hist(nnd[nnd <= 6], 300)
show()
if __name__=="__main__":
main()
| gpl-3.0 |
quiltdata/quilt | lambdas/preview/test/test_index.py | 1 | 27419 | """
Test functions for preview endpoint
"""
import json
import math
import os
import re
from pathlib import Path
from unittest.mock import ANY, patch
import pyarrow.parquet as pq
import responses
from t4_lambda_shared.utils import read_body
from .. import index
MOCK_ORIGIN = 'http://localhost:3000'
BASE_DIR = Path(__file__).parent / 'data'
# pylint: disable=no-member,invalid-sequence-index
class TestIndex():
"""Class to test various inputs to the main indexing function"""
FILE_URL = 'https://quilt-example.s3.amazonaws.com/file.ext'
# pylint: disable=too-many-function-args
# pylint hates on index.lambda_handler(event, None), even though, without the
# second arg we would get TypeError: wrapper() missing 1 required positional argument: '_'
@classmethod
def _make_event(cls, query, headers=None):
return {
'httpMethod': 'POST',
'path': '/foo',
'pathParameters': {},
'queryStringParameters': query or None,
'headers': headers or None,
'body': None,
'isBase64Encoded': False,
}
@responses.activate
def test_403(self):
"""test 403 cases, such as Glacier"""
url = self.FILE_URL
responses.add(
responses.GET,
url=url,
status=403,
)
event = self._make_event({'url': url, 'input': 'txt'})
response = index.lambda_handler(event, None)
assert response["statusCode"] == 403
body = json.loads(response["body"])
assert "text" in body
assert "error" in body
@responses.activate
def test_fcs(self):
"""test fcs extraction
for extended testing you can download FCS files here
https://flowrepository.org/experiments/4/download_ziped_files,
copy to data/fcs/ and run this unit test
"""
parent = BASE_DIR / "fcs"
fcs_files = list(parent.glob("*.fcs"))
extended = False
if (
set(os.path.split(f)[1] for f in fcs_files)
!= set(['accuri-ao1.fcs', 'bad.fcs', '3215apc 100004.fcs'])
):
extended = True
first = True
for fcs in fcs_files:
_, name = os.path.split(fcs)
file_bytes = fcs.read_bytes()
if first:
responses.add(
responses.GET,
self.FILE_URL,
body=file_bytes,
status=200,
)
first = False
else:
responses.replace(
responses.GET,
self.FILE_URL,
body=file_bytes,
status=200,
)
event = self._make_event({'url': self.FILE_URL, 'input': 'fcs'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, f'Expected 200, got {resp["statusCode"]}'
body = json.loads(read_body(resp))
assert 'info' in body
if 'warnings' not in body['info']:
if not extended:
assert name == 'accuri-ao1.fcs'
assert body['html'].startswith('<div>')
assert body['html'].endswith('</div>')
assert body['info']['metadata'].keys()
else:
assert not body['html']
if 'metadata' not in body['info']:
assert body['info']['warnings'].startswith('Unable')
if not extended:
assert name == 'bad.fcs'
else:
if not extended:
assert name == '3215apc 100004.fcs'
def test_bad(self):
"""send a known bad event (no input query parameter)"""
event = self._make_event({'url': self.FILE_URL}, {'origin': MOCK_ORIGIN})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 400, 'Expected 400 on event without "input" query param'
assert resp['body'], 'Expected explanation for 400'
assert resp['headers']['access-control-allow-origin'] == '*'
def test_bad_hostname(self):
bad_url = 'https://example.com/foo'
event = self._make_event({'url': bad_url, 'input': 'txt'}, {'origin': MOCK_ORIGIN})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 400, 'Expected 400 on event with a non-S3 URL'
body = json.loads(read_body(resp))
assert 'S3' in body['title'], 'Expected 400 explanation'
def test_bad_line_count(self):
"""send a known bad line_count parameter"""
garbage = '-1'
event = self._make_event({
'url': self.FILE_URL,
'input': 'txt',
'line_count': garbage}, {'origin': MOCK_ORIGIN})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 400, f'Expected 400 on event with line_count of {garbage}'
body = json.loads(read_body(resp))
assert 'Unexpected line_count=' in body['title'], 'Expected 400 explanation'
assert 'out of range' in body['detail'], 'Expected 400 explanation'
garbage = '123notint'
event = self._make_event({
'url': self.FILE_URL,
'input': 'txt',
'line_count': garbage}, {'origin': MOCK_ORIGIN})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 400, 'Expected 400 on event with line_count of 123notint'
body = json.loads(read_body(resp))
assert 'Unexpected line_count=' in body['title'], 'Expected 400 explanation'
assert 'invalid literal' in body['detail'], 'Expected 400 explanation'
def test_bad_max_bytes(self):
"""send a known bad max_bytes parameter"""
garbage = 'gfgfgf'
event = self._make_event({
'url': self.FILE_URL,
'input': 'txt',
'max_bytes': garbage}, {'origin': MOCK_ORIGIN})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 400, f'Expected 400 on event with line_count of {garbage}'
body = json.loads(read_body(resp))
assert 'Unexpected max_bytes=' in body['title'], 'Expected 400 explanation'
@responses.activate
def test_csv(self):
"""test returning HTML previews of CSV (via pandas)"""
csv = BASE_DIR / 'sample.csv'
responses.add(
responses.GET,
self.FILE_URL,
body=csv.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'csv'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview failed on sample.csv'
body_html = body['html']
assert body_html.count('<table') == 1, 'expected one HTML table'
assert body_html.count('</table>') == 1, 'expected one HTML table'
assert body_html.count('<p>') == body_html.count('</p>'), 'malformed HTML'
assert not re.match(r'\d+ rows × \d+ columns', body_html), \
'table dimensions should be removed'
with open(BASE_DIR / 'csv_html_response_head.txt') as expected:
head = expected.read()
assert head in body_html, 'unexpected first columns'
@responses.activate
def test_excel(self):
"""test parsing excel files in S3"""
workbook = BASE_DIR / 'sample.xlsx'
responses.add(
responses.GET,
self.FILE_URL,
body=workbook.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'excel'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview failed on sample.xlsx'
body_html = body['html']
assert body_html.count('Germany') == 13, 'unexpected data contents'
assert body_html.count('Enterprise') == 7, 'unexpected data contents'
assert body_html.count('Midmarket') == 13, 'unexpected data contents'
assert body_html.count('Canada') == 9, 'unexpected data contents'
@responses.activate
def test_ipynb(self):
"""test sending ipynb bytes"""
notebook = BASE_DIR / 'nb_1200727.ipynb'
responses.add(
responses.GET,
self.FILE_URL,
body=notebook.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'ipynb'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview failed on nb_1200727.ipynb'
body_html = body['html']
# neither lxml, nor py_w3c.validators.html.validator works to validate
# these fragments; reasons include base64 encoded images, html entities, etc.
# so we are going to trust nbconvert and just do some basic sanity checks
# it is also the case that we (often) need to update nbconvert, and
# HTML output changes version over version, so checking for exact HTML
# is fragile
assert body_html.count('<div') > 0, 'expected divs in ipynb HTML'
assert body_html.count('<div') == body_html.count('</div>')
assert body_html.count('<span') > 0, 'expected spans in ipynb HTML'
assert body_html.count('<span') == body_html.count('</span>')
# check for some strings we know should be in there
assert 'SVD of Minute-Market-Data' in body_html, 'missing expected contents'
assert 'Preprocessing' in body_html, 'missing expected contents'
assert '<pre>['SEE', 'SE', 'SHW', 'SIG',' in body_html, \
'Cell 3 output seems off'
assert (
'<span class="n">batch_size</span><span class="o">=</span><span class="mi">100</span>'
'<span class="p">'
) in body_html, 'Last cell output missing'
@patch(__name__ + '.index.LAMBDA_MAX_OUT', 89_322)
@responses.activate
def test_ipynb_chop(self):
"""test that we eliminate output cells when we're in danger of breaking
Lambda's invocation limit"""
notebook = BASE_DIR / 'nb_1200727.ipynb'
responses.add(
responses.GET,
self.FILE_URL,
body=notebook.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'ipynb'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview failed on nb_1200727.ipynb'
body_html = body['html']
# isclose bc string sizes differ, e.g. on Linux
assert math.isclose(len(body_html), 18084, abs_tol=200), "Hmm, didn't chop nb_1200727.ipynb"
@responses.activate
def test_ipynb_exclude(self):
"""test sending ipynb bytes"""
notebook = BASE_DIR / 'nb_1200727.ipynb'
responses.add(
responses.GET,
self.FILE_URL,
body=notebook.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'ipynb', 'exclude_output': 'true'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview failed on nb_1200727.ipynb'
body_html = body['html']
# neither lxml, nor py_w3c.validators.html.validator works to validate
# these fragments; reasons include base64 encoded images, html entities, etc.
# so we are going to trust nbconvert and just do some basic sanity checks
# it is also the case that we (often) need to update nbconvert, and
# HTML output changes version over version, so checking for exact HTML
# is fragile
assert body_html.count('<div') > 0, 'expected divs in ipynb HTML'
assert body_html.count('<div') == body_html.count('</div>')
assert body_html.count('<span') > 0, 'expected spans in ipynb HTML'
assert body_html.count('<span') == body_html.count('</span>')
# check for some strings we know should be in there
assert 'SVD of Minute-Market-Data' in body_html, 'missing expected contents'
assert 'Preprocessing' in body_html, 'missing expected contents'
assert '<pre>['SEE', 'SE', 'SHW', 'SIG',' not in body_html, \
'Unexpected output cell; exclude_output:true was given'
assert (
'<span class="n">batch_size</span><span class="o">=</span><span class="mi">100</span>'
'<span class="p">'
) in body_html, 'Last cell output missing'
assert len(body_html.encode()) < 19_000, \
'Preview larger than expected; exclude_output:true was given'
@responses.activate
def test_parquet(self):
"""test sending parquet bytes"""
parquet = BASE_DIR / 'atlantic_storms.parquet'
responses.add(
responses.GET,
self.FILE_URL,
body=parquet.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'parquet'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, f'Expected 200, got {resp["statusCode"]}'
body = json.loads(read_body(resp))
# open file and check body return against parquet metadata
pf = pq.ParquetFile(parquet)
assert all(f'<th>{col}</th>' in body['html'] for col in pf.schema.names), \
'missing a column header in the preview'
assert body['html'].count('<') > 0, 'expected tags in HTML'
assert body['html'].count('<') == body['html'].count('>'), \
'unmatched HTML tags'
assert set(pf.schema.names) == set(body['info']['schema']['names']), \
'unexpected difference of columns'
@responses.activate
def test_parquet_empty(self):
"""test a parquet file with columns but no rows"""
parquet = BASE_DIR / 'onlycolumns-c000'
responses.add(
responses.GET,
self.FILE_URL,
body=parquet.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'parquet'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, f'Expected 200, got {resp["statusCode"]}'
body = json.loads(read_body(resp))
assert '<th>column_a</th>' in body['html'], 'Missing column_a'
assert '<th>column_k</th>' in body['html'], 'Missing column_k'
assert '<th>column_z</th>' in body['html'], 'Missing column_z'
@responses.activate
def test_parquet_no_pandas(self):
"""test sending parquet bytes, but with a different metadata format"""
parquet = BASE_DIR / 'parquet_no_pandas.snappy.parquet'
responses.add(
responses.GET,
self.FILE_URL,
body=parquet.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'parquet'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, f'Expected 200, got {resp["statusCode"]}'
body = json.loads(read_body(resp))
# open file and check body return against parquet metadata
pf = pq.ParquetFile(parquet)
assert all(f'<th>{col}</th>' in body['html'] for col in pf.schema.names), \
'missing a column header in the preview'
assert body['html'].count('<') > 0, 'expected tags in HTML'
assert body['html'].count('<') == body['html'].count('>'), \
'unmatched HTML tags'
assert set(pf.schema.names) == set(body['info']['schema']['names']), \
'unexpected difference of columns'
@responses.activate
def test_tsv(self):
"""test returning HTML previews of TSV (via pandas)"""
csv = BASE_DIR / 'avengers.tsv'
responses.add(
responses.GET,
self.FILE_URL,
body=csv.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'csv', 'sep': '\t'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, f'preview failed on {csv}'
body_html = body['html']
assert body_html.count('<table') == 1, 'expected one HTML table'
assert body_html.count('</table>') == 1, 'expected one HTML table'
assert body_html.count('<thead>') == 1, 'expected one HTML table head'
assert body_html.count('</thead>') == 1, 'expected one HTML table head'
assert body_html.count('<p>') == body_html.count('</p>'), 'malformed HTML'
assert '<td>Nicholas Fury, Jr., Marcus Johnson</td>' in body_html, \
'Expected Nick to be an Avenger'
assert not re.match(r'\d+ rows × \d+ columns', body_html), \
'table dimensions should be removed'
with open(BASE_DIR / 'tsv_html_response_head.txt') as expected:
head = expected.read()
assert head in body_html, 'unexpected first columns'
@responses.activate
def test_tsv_quote(self):
"""test TSV from the glue NLP dataset"""
csv = BASE_DIR / 'dev.tsv'
responses.add(
responses.GET,
self.FILE_URL,
body=csv.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'csv', 'sep': '\t'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, f'preview failed on {csv}'
body_html = body['html']
assert "<td>While dioxin levels in the environment were up" in body_html,\
"missing expected cell"
assert "<td>In Soviet times the Beatles ' music \" was cons...</td>" in body_html,\
"missing expected cell"
warnings = body['info']['warnings']
assert warnings, f"expected warnings when parsing {csv}"
assert warnings.count("Skipping line") == 43, "expected to skip 43 lines"
@responses.activate
def test_tsv_as_csv(self):
"""test returning HTML previews of mislabeled or problematic CSVs (via pandas)"""
csv = BASE_DIR / 'tsv_mixed_types.csv'
responses.add(
responses.GET,
self.FILE_URL,
body=csv.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'csv'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, f'preview failed on {csv}'
body_html = body['html']
assert body_html.count('<table') == 1, 'expected one HTML table'
assert body_html.count('</table>') == 1, 'expected one HTML table'
assert body_html.count('<thead>') == 1, 'expected one HTML table head'
assert body_html.count('</thead>') == 1, 'expected one HTML table head'
assert body_html.count('<p>') == body_html.count('</p>'), 'malformed HTML'
assert '<td>Taiwan Strait, Taiwan (general), Taiwan</td>' in body_html, \
'Missing a cell on the Taiwan Strait'
assert not re.match(r'\d+ rows × \d+ columns', body_html), \
'table dimensions should be removed'
with open(BASE_DIR / 'tsv_mixed_types_html_response_head.txt') as expected:
head = expected.read()
assert head in body_html, 'unexpected first columns'
@responses.activate
def test_no_meta_parquet(self):
"""test a parquet file with no meta.metadata"""
no_meta_parquet = BASE_DIR / 'no_meta.parquet'
responses.add(
responses.GET,
self.FILE_URL,
body=no_meta_parquet.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'parquet'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, f'Expected 200, got {resp["statusCode"]}'
@responses.activate
@patch(__name__ + '.index.get_preview_lines')
def test_txt_max_count(self, get_preview_lines):
"""test truncation to line_count"""
responses.add(
responses.GET,
self.FILE_URL,
body='foo',
status=200)
for count in (1, 44, 19):
get_preview_lines.reset_mock()
get_preview_lines.return_value = []
event = self._make_event({'url': self.FILE_URL, 'input': 'txt', 'line_count': str(count)})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, 'preview lambda failed'
get_preview_lines.assert_called_with(ANY, None, count, index.CATALOG_LIMIT_BYTES)
@responses.activate
@patch(__name__ + '.index.get_preview_lines')
def test_txt_count_gz(self, get_preview_lines):
"""test truncation to line_count for a zipped file"""
responses.add(
responses.GET,
self.FILE_URL,
body='foo',
status=200)
for count in (9, 232, 308):
get_preview_lines.reset_mock()
get_preview_lines.return_value = []
event = self._make_event({
'url': self.FILE_URL,
'input': 'txt', 'line_count': str(count),
'compression': 'gz'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, 'preview lambda failed'
get_preview_lines.assert_called_with(ANY, 'gz', count, index.CATALOG_LIMIT_BYTES)
@responses.activate
def test_txt_short(self):
"""test sending txt bytes"""
txt = BASE_DIR / 'short.txt'
responses.add(
responses.GET,
self.FILE_URL,
body=txt.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'txt'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview lambda failed on short.txt'
headlist = body['info']['data']['head']
assert len(headlist) == 98, 'unexpected number of lines head'
assert headlist[0] == 'Line 1', 'unexpected first line in head'
assert headlist[97] == 'Line 98', 'unexpected last line in head'
taillist = body['info']['data']['tail']
assert not taillist, 'expected empty tail'
@patch(__name__ + '.index.CHUNK', 4)
@responses.activate
def test_max_bytes(self):
"""test max bytes"""
txt = BASE_DIR / 'short.txt'
responses.add(
responses.GET,
self.FILE_URL,
body=txt.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'txt', 'max_bytes': '3'})
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview lambda failed on short.txt'
headlist = body['info']['data']['head']
assert len(headlist) == 1, 'unexpected number of lines head'
assert headlist[0] == 'Line', 'unexpected first line in head'
@responses.activate
def test_vcf(self):
"""test sending vcf bytes"""
vcf = BASE_DIR / 'example.vcf'
responses.add(
responses.GET,
self.FILE_URL,
body=vcf.read_bytes(),
status=200)
event = self._make_event({'url': self.FILE_URL, 'input': 'vcf'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, 'preview failed on example.vcf'
_check_vcf(read_body(resp))
@responses.activate
def test_vcf_gz(self):
"""test sending vcf bytes (zipped)"""
vcf = BASE_DIR / 'example.vcf.gz'
responses.add(
responses.GET,
self.FILE_URL,
body=vcf.read_bytes(),
status=200)
event = self._make_event(
{'url': self.FILE_URL, 'input': 'vcf', 'compression': 'gz'})
resp = index.lambda_handler(event, None)
assert resp['statusCode'] == 200, 'preview failed on example.vcf.gz'
_check_vcf(read_body(resp))
# 513 = 128*4 + 1 => ensure there's a partial chunk in play
@patch(__name__ + '.index.CATALOG_LIMIT_BYTES', 513)
@patch(__name__ + '.index.CHUNK', 128)
@responses.activate
def test_vcf_gz_partial(self):
"""test previewing part of a gzipped file
we _should_ read 4 whole chunks and one partial one;
and the preview endpoint should truncate to the last whole line
"""
vcf = BASE_DIR / 'example.vcf.gz'
assert os.path.getsize(vcf) > 128*5, 'not testing partial file decode'
responses.add(
responses.GET,
self.FILE_URL,
body=vcf.read_bytes(),
status=200)
event = self._make_event(
{'url': self.FILE_URL, 'input': 'vcf', 'compression': 'gz'})
# test partial decode
resp = index.lambda_handler(event, None)
body = json.loads(read_body(resp))
assert resp['statusCode'] == 200, 'preview failed on example.vcf.gz, partial decode'
data = body['info']['data']
assert not data['data'], 'partial decode; did not expect any data'
assert not data['header'], 'partial decode; did not expect a header'
assert data['meta'][0] == '##fileformat=VCFv4.0', 'bad first meta line'
assert data['meta'][-1].startswith('##FILTER=<'), 'bad last meta line'
assert data['meta'][-1].endswith('samples have data">'), 'bad last meta line'
meta = body['info']['metadata']
assert meta['variant_count'] == 0, 'expected no variants'
assert not body['info']['metadata']['variants'], 'expected no variants'
def _check_vcf(resp):
"""common logic for checking vcf files, e.g. across compression settings"""
body = json.loads(resp)
assert body['info']['metadata']['variant_count'] == 3, 'expected 3 variants'
data = body['info']['data']
assert data['meta'][0] == '##fileformat=VCFv4.0', 'unexpected meta first line'
assert data['meta'][5].startswith('##INFO=<ID=NS,Number=1,Type=Integer,Desc'), \
'unexpected meta fifth line'
assert data['meta'][5].endswith('"Number of Samples With Data">'), \
'unexpected meta fifth line'
assert data['header'] == ['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT'], \
'unexpected header'
assert len(data['header']) == index.MIN_VCF_COLS + 1, 'unexpected number of columns'
assert body['info']['metadata']['variants'] == ['NA00001', 'NA00002', 'NA00003'], \
'unexpected variants'
assert len(data['data'][0]) == index.MIN_VCF_COLS + 1, 'unexpected number of columns'
assert data['data'][0] == [
'20', '14370', 'rs6054257', 'G', 'A', '29', 'PASS', 'NS=3;DP=14;AF=0.5;DB;H2', 'GT:GQ:DP:HQ'
], 'unexpected first data line'
assert data['data'][-1] == [
'20', '1234567', 'microsat1', 'GTCT', 'G,GTACT', '50', 'PASS', 'NS=3;DP=9;AA=G', 'GT:GQ:DP'
], 'unexpected first data line'
| apache-2.0 |
yyjiang/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 40 | 23697 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
| bsd-3-clause |
benanne/kaggle-galaxies | extract_pysex_params_extra.py | 8 | 3883 | import load_data
import pysex
import numpy as np
import multiprocessing as mp
import cPickle as pickle
"""
Extract a bunch of extra info to get a better idea of the size of objects
"""
SUBSETS = ['train', 'test']
TARGET_PATTERN = "data/pysex_params_gen2_%s.npy.gz"
SIGMA2 = 5000 # 5000 # std of the centrality weighting (Gaussian)
DETECT_THRESH = 10.0 # detection threshold for sextractor
NUM_PROCESSES = 8
def estimate_params(img):
img_green = img[..., 1] # supposedly using the green channel is a good idea. alternatively we could use luma.
# this seems to work well enough.
out = pysex.run(img_green, params=[
'X_IMAGE', 'Y_IMAGE', # barycenter
# 'XMIN_IMAGE', 'XMAX_IMAGE', 'YMIN_IMAGE', 'YMAX_IMAGE', # enclosing rectangle
# 'XPEAK_IMAGE', 'YPEAK_IMAGE', # location of maximal intensity
'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', # ellipse parameters
# 'PETRO_RADIUS',
'KRON_RADIUS', 'PETRO_RADIUS', 'FLUX_RADIUS', 'FWHM_IMAGE', # various radii
], conf_args={ 'DETECT_THRESH': DETECT_THRESH })
# x and y are flipped for some reason.
# theta should be 90 - theta.
# we convert these here so we can plot stuff with matplotlib easily.
try:
ys = out['X_IMAGE'].tonumpy()
xs = out['Y_IMAGE'].tonumpy()
as_ = out['A_IMAGE'].tonumpy()
bs = out['B_IMAGE'].tonumpy()
thetas = 90 - out['THETA_IMAGE'].tonumpy()
# kron_radii = out['KRON_RADIUS'].tonumpy()
petro_radii = out['PETRO_RADIUS'].tonumpy()
# flux_radii = out['FLUX_RADIUS'].tonumpy()
# fwhms = out['FWHM_IMAGE'].tonumpy()
# detect the most salient galaxy
# take in account size and centrality
surface_areas = np.pi * (as_ * bs)
centralities = np.exp(-((xs - 211.5)**2 + (ys - 211.5)**2)/SIGMA2) # 211.5, 211.5 is the center of the image
# salience is proportional to surface area, with a gaussian prior on the distance to the center.
saliences = surface_areas * centralities
most_salient_idx = np.argmax(saliences)
x = xs[most_salient_idx]
y = ys[most_salient_idx]
a = as_[most_salient_idx]
b = bs[most_salient_idx]
theta = thetas[most_salient_idx]
# kron_radius = kron_radii[most_salient_idx]
petro_radius = petro_radii[most_salient_idx]
# flux_radius = flux_radii[most_salient_idx]
# fwhm = fwhms[most_salient_idx]
except TypeError: # sometimes these are empty (no objects found), use defaults in that case
x = 211.5
y = 211.5
a = np.nan # dunno what this has to be, deal with it later
b = np.nan # same
theta = np.nan # same
# kron_radius = np.nan
petro_radius = np.nan
# flux_radius = np.nan
# fwhm = np.nan
# return (x, y, a, b, theta, flux_radius, kron_radius, petro_radius, fwhm)
return (x, y, a, b, theta, petro_radius)
for subset in SUBSETS:
print "SUBSET: %s" % subset
print
if subset == 'train':
num_images = load_data.num_train
ids = load_data.train_ids
elif subset == 'test':
num_images = load_data.num_test
ids = load_data.test_ids
def process(k):
print "image %d/%d (%s)" % (k + 1, num_images, subset)
img_id = ids[k]
img = load_data.load_image(img_id, from_ram=True, subset=subset)
return estimate_params(img)
pool = mp.Pool(NUM_PROCESSES)
estimated_params = pool.map(process, xrange(num_images), chunksize=100)
pool.close()
pool.join()
# estimated_params = map(process, xrange(num_images)) # no mp for debugging
params_array = np.array(estimated_params)
target_path = TARGET_PATTERN % subset
print "Saving to %s..." % target_path
load_data.save_gz(target_path, params_array)
| bsd-3-clause |
chianwei123/visualizer | context_switch.py | 1 | 2487 | #!/usr/bin/env python
import matplotlib.pyplot as plt
log = open('log', 'r')
lines = log.readlines()
# prepare for plotting
fig, ax = plt.subplots()
bar = 5
label = []
label_axes = []
context_switch = []
tasks = {}
for line in lines:
line = line.strip()
inst, args = line.split(' ', 1)
if inst == 'task':
id, priority, name = args.split(' ', 2)
task = {}
task['no'] = str(len(tasks) + 1) # index of task
task['priority'] = int(priority)
task['name'] = name.strip()
task['created'] = True
task['round'] = 0 # round of execution of this task
tasks[id] = task # we can qeury task by id in tasks later
elif inst == 'switch':
out_task, in_task, tick, tick_reload, out_minitick, in_minitick = args.split(' ')
out_time = (float(tick) + (float(tick_reload) - float(out_minitick)) / float(tick_reload)) / 100 * 1000;
in_time = (float(tick) + (float(tick_reload) - float(in_minitick)) / float(tick_reload)) / 100 * 1000;
overhead = {}
overhead['out'] = out_task
overhead['in'] = in_task
overhead['duration'] = in_time - out_time
context_switch.append(overhead)
out_round = tasks[out_task]['round']
in_round = tasks[in_task]['round']
tasks[out_task]['round'] += 1
tasks[out_task][str(out_round) + 'out'] = out_time # record out time of each round the task
tasks[in_task][str(in_round) + 'in'] = in_time
log.close()
cost = open('cost', 'w')
# grasp = open('sched.grasp', w') # maybe grasp could display context switch cost, not yet study
for overhead in context_switch:
cost.write('switch from %s to %s cost %f microseconds\n' % (overhead['out'], overhead['in'], overhead['duration']))
cost.close()
times = open('times', 'w')
for id in tasks:
serial = []
r = 0
try:
while r < tasks[id]['round']:
#times.write('on %f %s in\n' % (tasks[id][str(r) +'in'], tasks[id]['name']))
#times.write('on %f %s out\n' % (tasks[id][str(r) + 'out'], tasks[id]['name']))
tasks[id][str(r) + 'elapse'] = tasks[id][str(r) + 'out'] - tasks[id][str(r) + 'in']
#times.write('elapse %f\n' % (tasks[id][str(r) + 'elapse']))
serial.append((tasks[id][str(r) + 'in'], tasks[id][str(r) + 'elapse']))
r += 1
ax.broken_barh(serial, (bar, 5), facecolors='blue')
label.append((tasks[id]['name']))
label_axes.append((float(bar) + 2.5))
bar += 10
except:
pass
times.close()
ax.set_ylim(0, 100)
ax.set_xlim(0, 4000)
ax.set_xlabel('time elapse')
ax.set_yticks(label_axes)
ax.set_yticklabels(label)
ax.grid(False)
plt.show() | bsd-2-clause |
iulian787/spack | var/spack/repos/builtin/packages/py-hatchet/package.py | 5 | 1026 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHatchet(PythonPackage):
"""Hatchet is an analysis tool for structured tree or graph performance data
using an indexed Pandas dataframe."""
homepage = "https://github.com/LLNL/hatchet"
url = "https://github.com/LLNL/hatchet/archive/v1.0.0.tar.gz"
maintainers = ["slabasan", "bhatele", "tgamblin"]
version('1.0.0', sha256='efd218bc9152abde0a8006489a2c432742f00283a114c1eeb6d25abc10f5862d')
depends_on('[email protected],3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-pydot', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
| lgpl-2.1 |
treesnail/tushare | setup.py | 21 | 2592 | from setuptools import setup, find_packages
import codecs
import os
import tushare
def read(fname):
return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()
long_desc = """
TuShare
===============
.. image:: https://api.travis-ci.org/waditu/tushare.png?branch=master
:target: https://travis-ci.org/waditu/tushare
.. image:: https://badge.fury.io/py/tushare.png
:target: http://badge.fury.io/py/tushare
* easy to use as most of the data returned are pandas DataFrame objects
* can be easily saved as csv, excel or json files
* can be inserted into MySQL or Mongodb
Target Users
--------------
* financial market analyst of China
* learners of financial data analysis with pandas/NumPy
* people who are interested in China financial data
Installation
--------------
pip install tushare
Upgrade
---------------
pip install tushare --upgrade
Quick Start
--------------
::
import tushare as ts
ts.get_hist_data('600848')
return::
open high close low volume p_change ma5 \
date
2012-01-11 6.880 7.380 7.060 6.880 14129.96 2.62 7.060
2012-01-12 7.050 7.100 6.980 6.900 7895.19 -1.13 7.020
2012-01-13 6.950 7.000 6.700 6.690 6611.87 -4.01 6.913
2012-01-16 6.680 6.750 6.510 6.480 2941.63 -2.84 6.813
2012-01-17 6.660 6.880 6.860 6.460 8642.57 5.38 6.822
2012-01-18 7.000 7.300 6.890 6.880 13075.40 0.44 6.788
2012-01-19 6.690 6.950 6.890 6.680 6117.32 0.00 6.770
2012-01-20 6.870 7.080 7.010 6.870 6813.09 1.74 6.832
"""
setup(
name='tushare',
version=tushare.__version__,
description='A utility for crawling historical and Real-time Quotes data of China stocks',
# long_description=read("READM.rst"),
long_description = long_desc,
author='Jimmy Liu',
author_email='[email protected]',
license='BSD',
url='http://tushare.org',
keywords='china stock data',
classifiers=['Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'],
packages=['tushare','tushare.stock','tushare.data','tushare.util'],
package_data={'': ['*.csv']},
) | bsd-3-clause |
vinhqdang/measure_distance_image | vinh.py | 1 | 6987 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 17 14:26:04 2014
@author: williams savero torres
Modifications by Piotr:
- adding image histogram equalization for better noise ration detection
- resolving system paths problem -> using op.path.join
- sorting input files by names
- adjusting (boosting) plot properties
Updated by Vinh Dang on 18 - Mar - 2015:
- fix the bug when the number of maxmas less than 4. Now, the number of maxmas will be sure 4
- make the curve smooth for better detection
- support to process 1 single file
"""
from __future__ import division
import numpy as np
from numpy import*
import scipy.ndimage as ndimage
#import matplotlib.pyplot as plt
import pylab
#import scipy.misc
import scipy
from pylab import *
from skimage.segmentation import random_walker
from scipy import ndimage
#for image adjustments:
from skimage.exposure import equalize_hist
def BATCH(kerr):
#reading image
img = ndimage.imread(kerr)
#equalizing histogram - added by Piotr
img = equalize_hist(img)
# Note the 0 sigma for the last axis, we don't wan't to blurr the color planes together!
img = ndimage.gaussian_filter(img, sigma=(2, 2), mode='nearest', order=0)
# Make a line with "num" points...
# Modify x0, x1, y0, y1 if needed to make sure the line cut two circles
x0, y0 = 1080, 438
x1, y1 = 1080, 1388
num = 10000
x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
# Extract the values along the line, using cubic interpolation
zi = scipy.ndimage.map_coordinates(np.transpose(img), np.vstack((x,y)), order=5)
#print zi[0]
# Store the original value of zi
z0 = zi
# Modify if needed to adjust the smooth function
smooth_range = 50
d1 = []
for i in range (len (zi)):
sum = 0.0
count = 0
for j in range (i - smooth_range, i + smooth_range + 1):
if (j >= 0 and j < len (zi)):
sum += zi [j]
count += 1
d1.append (sum / count)
zi = d1
# Ma = x[zi == max(zi)]
d=diff(zi, n=1, axis=-1)
mean_d=mean(abs(d))
#print d
#!--------------peaks definition--------!#
def peakdet(v, delta, x = None):
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return maxtab, mintab
#!---------------------------------------------------------------
#! setting up threshold for detection
treshold=3*mean_d
det=peakdet(abs(d), treshold, x = None)
print "detection =",det
for i in range(len(det[0])):
print "%i peak position = %.3f"%(i,(y[det[0][i][0]]))
#print "number of maxmas found =",len(det[0])
#print "number of minimas found =",len(det[1])
#! testing number of maximas
#!---------------------------------------------------------------
while len(det[0])!= 4:
if (len(det[0]) > 4):
print "not sufficient threshold was used..."
print "using 2% higher threshold.."
treshold=treshold + 0.02*treshold
det=peakdet(abs(d), treshold, x = None)
print "new number of maxmas found with higher threshold =",len(det[0])
else:
print "not enough threshold was used..."
print "using 5% lower threshold.."
treshold=treshold - 0.05*treshold
det=peakdet(abs(d), treshold, x = None)
print "new number of maxmas found with lower threshold =",len(det[0])
#=======================================Ploting
figure(figsize=(16,8))
subplot(311,xticklabels=[],yticklabels=[])
pylab.gray()
imshow(img,aspect='equal')
plot(x, y, 'r-', linewidth=1)
subplot(312,xticklabels=[])
plot(y,zi,'b-',lw=3)
plot (y, z0, 'y-', lw = 3)
#if len(det[0])==4:
d1=((y[det[0][1][0]]-y[det[0][0][0]]))
d2=((y[det[0][3][0]]-y[det[0][2][0]]))
figtext(0.7,0.95,"$d_{up}[\mu m]=%.6f$"%float(d1),size=20)
figtext(0.7,0.85,"$d_{down}[\mu m]=%.6f$"%float(d2),size=20)
dt1_list.append(d1)
dt2_list.append(d2)
#else:
# pass
summary.write( '%s \t %i \t %.3f \t %.3f \n'%(str(os.path.basename(kerr[:-4])),count,d1,d2))
summary.flush()
for i in range(len(det[0])):
axvline(x=y[det[0][i][0]],color='r',lw=3)
subplot(313,yticklabels=[])
plot(y[:-1],d,'g-',lw=3)
axhline(y=(treshold),color='r',label='$treshold$',lw=3)
axhline(y=(-treshold),color='r',label='$treshold$',lw=3)
fill_between(y[:-1], 0, abs(d) ,alpha=0.3,facecolor='g', interpolate=True)
tight_layout()
savefig(os.path.join(wdir,'analyses','%s.png'%(str(os.path.basename(kerr[:-4])))))
clf()
close()
#figure.close()
#!_________________________PROGRAMS LOOP___________________
#!---------------------------------------------------------------
print "starting program.."
import os
import glob
import sys
#! defining working directory
wdir=str(os.getcwd())
#! checking existance of the analyses directory
if not os.path.exists(os.path.join(wdir,'analyses')):
os.makedirs(os.path.join(wdir,'analyses'))
print "analyses directory created"
#! creating summary file with header
summary = open(os.path.join(wdir,"analyses","SUMMARY.data"), "w")
summary.write( '#filename \t count \t d1[um] \t d2[um]\n')
summary.flush()
#! creating empty d times list
dt1_list=[]
dt2_list=[]
#! iterating on all files in the directory
if (len(sys.argv) == 1):
count=1
for infile in sorted(glob.glob(os.path.join(wdir,'*.png') )):
print "\n current file is: " + infile
BATCH(infile)
count+=1
print "no more files have been found"
elif (len(sys.argv) == 2): #process a particular file
BATCH (sys.argv[1])
summary.close()
print "\n ALL DONE \n"
| gpl-2.0 |
gertingold/scipy | scipy/spatial/tests/test__plotutils.py | 11 | 2156 | from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_, assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import MatplotlibDeprecationWarning
has_matplotlib = True
except Exception:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
with suppress_warnings() as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| bsd-3-clause |
jamlamberti/bogo_probe | learner/svr.py | 1 | 1827 | """An SVR implementation"""
import numpy as np
import sklearn.svm
from .learner import Learner
# Currently just a wrapper around SKLearn
# I think we might want to look into
# online variants (e.g. passive-aggressive algos)
# A generic implementation with multiple kernels
# would be nice!!
class SVR(Learner):
"""SVM for Regression Wrapper"""
def __init__(
self,
kernel='linear',
degree=3,
epsilon=0.01,
# gamma='auto',
coef0=0.0,
):
super(SVR, self).__init__()
self.classifier = sklearn.svm.SVR(
kernel=kernel,
epsilon=epsilon,
degree=degree,
# gamma=gamma,
coef0=coef0,
)
self.log.debug("Initialized an SVR classifier with:")
self.log.debug(
' kernel=%s, degree=%d, coef0=%0.3f',
kernel,
degree,
# gamma,
coef0)
def train(self, train_x, train_y):
"""
Train the SVM classifier
"""
self.log.info("Training SVR classifier")
train_y = np.array(train_y)
if len(train_y.shape) == 2:
self.classifier.fit(train_x, np.asarray(train_y).reshape(-1))
else:
self.classifier.fit(train_x, train_y)
self.log.info("Done training SVR classifier")
def predict(self, test_x):
"""
Return predicted class labels
"""
self.log.info("Computing SVR predictions")
return self.classifier.predict(test_x)
def predict_proba(self, test_x):
"""
Return predicted probabilities from SVR classifier
"""
# Should we wrap this and return [predict 1-predict]
raise NotImplementedError("SVR does not support probabilities")
| gpl-3.0 |
blondegeek/pymatgen | pymatgen/electronic_structure/plotter.py | 1 | 183028 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
import math
import itertools
import warnings
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.core.periodic_table import Element
from pymatgen.electronic_structure.core import Spin, Orbital, OrbitalType
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.util.plotting import pretty_plot, \
add_fig_kwargs, get_ax3d_fig_plt
from collections import Counter
import copy
from pymatgen.electronic_structure.boltztrap import BoltztrapError
from pymatgen.symmetry.bandstructure import HighSymmKpath
"""
This module implements plotter for DOS and band structure.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "May 1, 2012"
logger = logging.getLogger(__name__)
class DosPlotter:
"""
Class for plotting DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = DosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompleteDos.get_spd/element/others_dos().
plotter.add_dos_dict({"dos1": dos1, "dos2": dos2})
plotter.add_dos_dict(complete_dos.get_spd_dos())
Args:
zero_at_efermi: Whether to shift all Dos to have zero energy at the
fermi energy. Defaults to True.
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, zero_at_efermi=True, stack=False, sigma=None):
self.zero_at_efermi = zero_at_efermi
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
label:
label for the DOS. Must be unique.
dos:
Dos object
"""
energies = dos.energies - dos.efermi if self.zero_at_efermi \
else dos.energies
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
efermi = dos.efermi
self._doses[label] = {'energies': energies, 'densities': densities,
'efermi': efermi}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
dict: Dict of dos data. Generally of the form
{label: {'energies':..., 'densities': {'up':...}, 'efermi':efermi}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
y = None
alldensities = []
allenergies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of energies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
energies = dos['energies']
densities = dos['densities']
if not y:
y = {Spin.up: np.zeros(energies.shape),
Spin.down: np.zeros(energies.shape)}
newdens = {}
for spin in [Spin.up, Spin.down]:
if spin in densities:
if self.stack:
y[spin] += densities[spin]
newdens[spin] = y[spin].copy()
else:
newdens[spin] = densities[spin]
allenergies.append(energies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allenergies.reverse()
allpts = []
for i, key in enumerate(keys):
x = []
y = []
for spin in [Spin.up, Spin.down]:
if spin in alldensities[i]:
densities = list(int(spin) * alldensities[i][spin])
energies = list(allenergies[i])
if spin == Spin.down:
energies.reverse()
densities.reverse()
x.extend(energies)
y.extend(densities)
allpts.extend(list(zip(x, y)))
if self.stack:
plt.fill(x, y, color=colors[i % ncolors],
label=str(key))
else:
plt.plot(x, y, color=colors[i % ncolors],
label=str(key), linewidth=3)
if not self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([self._doses[key]['efermi'],
self._doses[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
if self.zero_at_efermi:
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Energies (eV)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class BSPlotter:
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, BandStructureSymmLine):
raise ValueError(
"BSPlotter only works with BandStructureSymmLine objects. "
"A BandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
# TODO: come with an intelligent way to cut the highest unconverged
# bands
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if ticks['label'][i] == ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=ticks['distance'][i], l=ticks['label'][i]))
plt.axvline(ticks['distance'][i], color='k')
return plt
def bs_plot_data(self, zero_to_efermi=True):
"""
Get the data nicely formatted for a plot
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from the
eigenvalues and plot.
Returns:
dict: A dictionary of the following format:
ticks: A dict with the 'distances' at which there is a kpoint (the
x axis) and the labels (None if no label).
energy: A dict storing bands for spin up and spin down data
[{Spin:[band_index][k_point_index]}] as a list (one element
for each branch) of energy for each kpoint. The data is
stored by branch to facilitate the plotting.
vbm: A list of tuples (distance,energy) marking the vbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
cbm: A list of tuples (distance,energy) marking the cbms. The
energies are shifted with respect to the fermi level is the
option has been selected.
lattice: The reciprocal lattice.
zero_energy: This is the energy used as zero for the plot.
band_gap:A string indicating the band gap and its nature (empty if
it's a metal).
is_metal: True if the band structure is metallic (i.e., there is at
least one band crossing the fermi level).
"""
distance = []
energy = []
if self._bs.is_metal():
zero_energy = self._bs.efermi
else:
zero_energy = self._bs.get_vbm()['energy']
if not zero_to_efermi:
zero_energy = 0.0
for b in self._bs.branches:
if self._bs.is_spin_polarized:
energy.append({str(Spin.up): [], str(Spin.down): []})
else:
energy.append({str(Spin.up): []})
distance.append([self._bs.distance[j]
for j in range(b['start_index'],
b['end_index'] + 1)])
ticks = self.get_ticks()
for i in range(self._nb_bands):
energy[-1][str(Spin.up)].append(
[self._bs.bands[Spin.up][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
energy[-1][str(Spin.down)].append(
[self._bs.bands[Spin.down][i][j] - zero_energy
for j in range(b['start_index'], b['end_index'] + 1)])
vbm = self._bs.get_vbm()
cbm = self._bs.get_cbm()
vbm_plot = []
cbm_plot = []
for index in cbm['kpoint_index']:
cbm_plot.append((self._bs.distance[index],
cbm['energy'] - zero_energy if zero_to_efermi
else cbm['energy']))
for index in vbm['kpoint_index']:
vbm_plot.append((self._bs.distance[index],
vbm['energy'] - zero_energy if zero_to_efermi
else vbm['energy']))
bg = self._bs.get_band_gap()
direct = "Indirect"
if bg['direct']:
direct = "Direct"
return {'ticks': ticks, 'distances': distance, 'energy': energy,
'vbm': vbm_plot, 'cbm': cbm_plot,
'lattice': self._bs.lattice_rec.as_dict(),
'zero_energy': zero_energy, 'is_metal': self._bs.is_metal(),
'band_gap': "{} {} bandgap = {}".format(direct,
bg['transition'],
bg['energy'])
if not self._bs.is_metal() else ""}
def get_plot(self, zero_to_efermi=True, ylim=None, smooth=False,
vbm_cbm_marker=False, smooth_tol=None):
"""
Get a matplotlib object for the bandstructure plot.
Blue lines are up spin, red lines are down
spin.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
smooth_tol (float) : tolerance for fitting spline to band data.
Default is None such that no tolerance will be used.
"""
plt = pretty_plot(12, 8)
from matplotlib import rc
import scipy.interpolate as scint
# main internal config options
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
# band_linewidth = 3
band_linewidth = 1
data = self.bs_plot_data(zero_to_efermi)
if not smooth:
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))], 'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
'r--', linewidth=band_linewidth)
else:
# Interpolation failure can be caused by trying to fit an entire
# band with one spline rather than fitting with piecewise splines
# (splines are ill-suited to fit discontinuities).
#
# The number of splines used to fit a band is determined by the
# number of branches (high symmetry lines) defined in the
# BandStructureSymmLine object (see BandStructureSymmLine._branches).
warning = "WARNING! Distance / branch {d}, band {i} cannot be " + \
"interpolated.\n" + \
"See full warning in source.\n" + \
"If this is not a mistake, try increasing " + \
"smooth_tol.\nCurrent smooth_tol is {s}."
for d in range(len(data['distances'])):
for i in range(self._nb_bands):
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.up)][i][j]
for j in range(len(data['distances'][d]))],
s=smooth_tol)
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
xs = [x * step + data['distances'][d][0]
for x in range(1000)]
ys = [scint.splev(x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)]
for y in ys:
if np.isnan(y):
print(warning.format(d=str(d), i=str(i),
s=str(smooth_tol)))
break
plt.plot(xs, ys, 'b-', linewidth=band_linewidth)
if self._bs.is_spin_polarized:
tck = scint.splrep(
data['distances'][d],
[data['energy'][d][str(Spin.down)][i][j]
for j in range(len(data['distances'][d]))],
s=smooth_tol)
step = (data['distances'][d][-1]
- data['distances'][d][0]) / 1000
xs = [x * step + data['distances'][d][0]
for x in range(1000)]
ys = [scint.splev(
x * step + data['distances'][d][0],
tck, der=0)
for x in range(1000)]
for y in ys:
if np.isnan(y):
print(warning.format(d=str(d), i=str(i),
s=str(smooth_tol)))
break
plt.plot(xs, ys, 'r--', linewidth=band_linewidth)
self._maketicks(plt)
# Main X and Y Labels
plt.xlabel(r'$\mathrm{Wave\ Vector}$', fontsize=30)
ylabel = r'$\mathrm{E\ -\ E_f\ (eV)}$' if zero_to_efermi \
else r'$\mathrm{Energy\ (eV)}$'
plt.ylabel(ylabel, fontsize=30)
# Draw Fermi energy, only if not the zero
if not zero_to_efermi:
ef = self._bs.efermi
plt.axhline(ef, linewidth=2, color='k')
# X range (K)
# last distance point
x_max = data['distances'][-1][-1]
plt.xlim(0, x_max)
if ylim is None:
if self._bs.is_metal():
# Plot A Metal
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs.efermi + e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min,
data['cbm'][0][1] + e_max)
else:
plt.ylim(ylim)
if not self._bs.is_metal() and vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.tight_layout()
return plt
def show(self, zero_to_efermi=True, ylim=None, smooth=False,
smooth_tol=None):
"""
Show the plot using matplotlib.
Args:
zero_to_efermi: Automatically subtract off the Fermi energy from
the eigenvalues and plot (E-Ef).
ylim: Specify the y-axis (energy) limits; by default None let
the code choose. It is vbm-4 and cbm+4 if insulator
efermi-10 and efermi+10 if metal
smooth: interpolates the bands by a spline cubic
smooth_tol (float) : tolerance for fitting spline to band data.
Default is None such that no tolerance will be used.
"""
plt = self.get_plot(zero_to_efermi, ylim, smooth)
plt.show()
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close()
def get_ticks(self):
"""
Get all ticks and labels for a band structure plot.
Returns:
dict: A dictionary with 'distance': a list of distance at which
ticks should be set and 'label': a list of label for each of those
ticks.
"""
tick_distance = []
tick_labels = []
previous_label = self._bs.kpoints[0].label
previous_branch = self._bs.branches[0]['name']
for i, c in enumerate(self._bs.kpoints):
if c.label is not None:
tick_distance.append(self._bs.distance[i])
this_branch = None
for b in self._bs.branches:
if b['start_index'] <= i <= b['end_index']:
this_branch = b['name']
break
if c.label != previous_label \
and previous_branch != this_branch:
label1 = c.label
if label1.startswith("\\") or label1.find("_") != -1:
label1 = "$" + label1 + "$"
label0 = previous_label
if label0.startswith("\\") or label0.find("_") != -1:
label0 = "$" + label0 + "$"
tick_labels.pop()
tick_distance.pop()
tick_labels.append(label0 + "$\\mid$" + label1)
else:
if c.label.startswith("\\") or c.label.find("_") != -1:
tick_labels.append("$" + c.label + "$")
else:
tick_labels.append(c.label)
previous_label = c.label
previous_branch = this_branch
return {'distance': tick_distance, 'label': tick_labels}
def plot_compare(self, other_plotter, legend=True):
"""
plot two band structure for comparison. One is in red the other in blue
(no difference in spins). The two band structures need to be defined
on the same symmetry lines! and the distance between symmetry lines is
the one of the band structure used to build the BSPlotter
Args:
another band structure object defined along the same symmetry lines
Returns:
a matplotlib object with both band structures
"""
# TODO: add exception if the band structures are not compatible
import matplotlib.lines as mlines
plt = self.get_plot()
data_orig = self.bs_plot_data()
data = other_plotter.bs_plot_data()
band_linewidth = 1
for i in range(other_plotter._nb_bands):
for d in range(len(data_orig['distances'])):
plt.plot(data_orig['distances'][d],
[e[str(Spin.up)][i] for e in data['energy']][d],
'c-', linewidth=band_linewidth)
if other_plotter._bs.is_spin_polarized:
plt.plot(data_orig['distances'][d],
[e[str(Spin.down)][i] for e in data['energy']][d],
'm--', linewidth=band_linewidth)
if legend:
handles = [mlines.Line2D([], [], linewidth=2,
color='b', label='bs 1 up'),
mlines.Line2D([], [], linewidth=2,
color='r', label='bs 1 down',
linestyle="--"),
mlines.Line2D([], [], linewidth=2,
color='c', label='bs 2 up'),
mlines.Line2D([], [], linewidth=2,
color='m', linestyle="--",
label='bs 2 down')]
plt.legend(handles=handles)
return plt
def plot_brillouin(self):
"""
plot the Brillouin zone
"""
# get labels and lines
labels = {}
for k in self._bs.kpoints:
if k.label:
labels[k.label] = k.frac_coords
lines = []
for b in self._bs.branches:
lines.append([self._bs.kpoints[b['start_index']].frac_coords,
self._bs.kpoints[b['end_index']].frac_coords])
plot_brillouin_zone(self._bs.lattice_rec, lines=lines, labels=labels)
class BSPlotterProjected(BSPlotter):
"""
Class to plot or get data to facilitate the plot of band structure objects
projected along orbitals, elements or sites.
Args:
bs: A BandStructureSymmLine object with projections.
"""
def __init__(self, bs):
if len(bs.projections) == 0:
raise ValueError("try to plot projections"
" on a band structure without any")
super().__init__(bs)
def _get_projections_by_branches(self, dictio):
proj = self._bs.get_projections_on_elements_and_orbitals(dictio)
proj_br = []
for b in self._bs.branches:
if self._bs.is_spin_polarized:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.up)][i].append(
{e: {o: proj[Spin.up][i][j][e][o]
for o in proj[Spin.up][i][j][e]}
for e in proj[Spin.up][i][j]})
if self._bs.is_spin_polarized:
for b in self._bs.branches:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
proj_br[-1][str(Spin.down)][i].append(
{e: {o: proj[Spin.down][i][j][e][o]
for o in proj[Spin.down][i][j][e]}
for e in proj[Spin.down][i][j]})
return proj_br
def get_projected_plots_dots(self, dictio, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
and orbitals.
Args:
dictio: The element and orbitals you want a projection on. The
format is {Element:[Orbitals]} for instance
{'Cu':['d','s'],'O':['p']} will give projections for Cu on
d and s orbitals and on oxygen p.
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down.
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital.
"""
band_linewidth = 1.0
fig_number = sum([len(v) for v in dictio.values()])
proj = self._get_projections_by_branches(dictio)
data = self.bs_plot_data(zero_to_efermi)
plt = pretty_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in dictio:
for o in dictio[el]:
plt.subplot(100 * math.ceil(fig_number / 2) + 20 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))],
'b-',
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in
range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(
len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][
j], 'ro',
markersize=
proj[b][str(Spin.down)][i][j][str(el)][
o] * 15.0)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j],
'bo',
markersize=
proj[b][str(Spin.up)][i][j][str(el)][
o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs.efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r',
marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g',
marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el) + " " + str(o))
count += 1
return plt
def get_elt_projected_plots(self, zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False):
"""
Method returning a plot composed of subplots along different elements
Returns:
a pylab object with different subfigures for each projection
The blue and red colors are for spin up and spin down
The bigger the red or blue dot in the band structure the higher
character for the corresponding element and orbital
"""
band_linewidth = 1.0
proj = self._get_projections_by_branches({e.symbol: ['s', 'p', 'd']
for e in
self._bs.structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
plt = pretty_plot(12, 8)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 1
for el in self._bs.structure.composition.elements:
plt.subplot(220 + count)
self._maketicks(plt)
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.up)][i][j]
for j in range(len(data['distances'][b]))], '-',
color=[192 / 255, 192 / 255, 192 / 255],
linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(data['distances'][b],
[data['energy'][b][str(Spin.down)][i][j]
for j in range(len(data['distances'][b]))],
'--', color=[128 / 255, 128 / 255, 128 / 255],
linewidth=band_linewidth)
for j in range(len(data['energy'][b][str(Spin.up)][i])):
markerscale = sum([proj[b][str(Spin.down)][i][
j][str(el)][o] for o in
proj[b]
[str(Spin.down)][i][j][
str(el)]])
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.down)][i][j],
'bo',
markersize=markerscale * 15.0,
color=[markerscale, 0.3 * markerscale,
0.4 * markerscale])
for j in range(len(data['energy'][b][str(Spin.up)][i])):
markerscale = sum(
[proj[b][str(Spin.up)][i][j][str(el)][o]
for o in proj[b]
[str(Spin.up)][i][j][str(el)]])
plt.plot(data['distances'][b][j],
data['energy'][b][str(Spin.up)][i][j], 'o',
markersize=markerscale * 15.0,
color=[markerscale, 0.3 * markerscale,
0.4 * markerscale])
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min, self._bs.efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r', marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g', marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min, data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(str(el))
count += 1
return plt
def get_elt_projected_plots_color(self, zero_to_efermi=True,
elt_ordered=None):
"""
returns a pylab plot object with one plot where the band structure
line color depends on the character of the band (along different
elements). Each element is associated with red, green or blue
and the corresponding rgb color depending on the character of the band
is used. The method can only deal with binary and ternary compounds
spin up and spin down are differientiated by a '-' and a '--' line
Args:
elt_ordered: A list of Element ordered. The first one is red,
second green, last blue
Returns:
a pylab object
"""
band_linewidth = 3.0
if len(self._bs.structure.composition.elements) > 3:
raise ValueError
if elt_ordered is None:
elt_ordered = self._bs.structure.composition.elements
proj = self._get_projections_by_branches(
{e.symbol: ['s', 'p', 'd']
for e in self._bs.structure.composition.elements})
data = self.bs_plot_data(zero_to_efermi)
plt = pretty_plot(12, 8)
spins = [Spin.up]
if self._bs.is_spin_polarized:
spins = [Spin.up, Spin.down]
self._maketicks(plt)
for s in spins:
for b in range(len(data['distances'])):
for i in range(self._nb_bands):
for j in range(len(data['energy'][b][str(s)][i]) - 1):
sum_e = 0.0
for el in elt_ordered:
sum_e = sum_e + \
sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
if sum_e == 0.0:
color = [0.0] * len(elt_ordered)
else:
color = [sum([proj[b][str(s)][i][j][str(el)][o]
for o
in proj[b][str(s)][i][j][str(el)]])
/ sum_e
for el in elt_ordered]
if len(color) == 2:
color.append(0.0)
color[2] = color[1]
color[1] = 0.0
sign = '-'
if s == Spin.down:
sign = '--'
plt.plot([data['distances'][b][j],
data['distances'][b][j + 1]],
[data['energy'][b][str(s)][i][j],
data['energy'][b][str(s)][i][j + 1]], sign,
color=color, linewidth=band_linewidth)
if self._bs.is_metal():
if zero_to_efermi:
e_min = -10
e_max = 10
plt.ylim(e_min, e_max)
plt.ylim(self._bs.efermi + e_min, self._bs.efermi + e_max)
else:
plt.ylim(data['vbm'][0][1] - 4.0, data['cbm'][0][1] + 2.0)
return plt
def _get_projections_by_branches_patom_pmorb(self, dictio, dictpa,
sum_atoms, sum_morbs,
selected_branches):
import copy
setos = {'s': 0, 'py': 1, 'pz': 2, 'px': 3, 'dxy': 4, 'dyz': 5,
'dz2': 6, 'dxz': 7,
'dx2': 8, 'f_3': 9, 'f_2': 10, 'f_1': 11, 'f0': 12, 'f1': 13,
'f2': 14, 'f3': 15}
num_branches = len(self._bs.branches)
if selected_branches is not None:
indices = []
if not isinstance(selected_branches, list):
raise TypeError(
"You do not give a correct type of 'selected_branches'. It should be 'list' type.")
elif len(selected_branches) == 0:
raise ValueError(
"The 'selected_branches' is empty. We cannot do anything.")
else:
for index in selected_branches:
if not isinstance(index, int):
raise ValueError(
"You do not give a correct type of index of symmetry lines. It should be "
"'int' type")
elif index > num_branches or index < 1:
raise ValueError(
"You give a incorrect index of symmetry lines: %s. The index should be in "
"range of [1, %s]." % (
str(index), str(num_branches)))
else:
indices.append(index - 1)
else:
indices = range(0, num_branches)
proj = self._bs.projections
proj_br = []
for index in indices:
b = self._bs.branches[index]
print(b)
if self._bs.is_spin_polarized:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
edict = {}
for elt in dictpa:
for anum in dictpa[elt]:
edict[elt + str(anum)] = {}
for morb in dictio[elt]:
edict[elt + str(anum)][morb] = \
proj[Spin.up][i][j][setos[morb]][anum - 1]
proj_br[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(b['start_index'], b['end_index'] + 1):
edict = {}
for elt in dictpa:
for anum in dictpa[elt]:
edict[elt + str(anum)] = {}
for morb in dictio[elt]:
edict[elt + str(anum)][morb] = \
proj[Spin.up][i][j][setos[morb]][anum - 1]
proj_br[-1][str(Spin.down)][i].append(edict)
# Adjusting projections for plot
dictio_d, dictpa_d = self._summarize_keys_for_plot(dictio, dictpa,
sum_atoms, sum_morbs)
print('dictio_d: %s' % str(dictio_d))
print('dictpa_d: %s' % str(dictpa_d))
if (sum_atoms is None) and (sum_morbs is None):
proj_br_d = copy.deepcopy(proj_br)
else:
proj_br_d = []
branch = -1
for index in indices:
branch += 1
br = self._bs.branches[index]
if self._bs.is_spin_polarized:
proj_br_d.append(
{str(Spin.up): [[] for l in range(self._nb_bands)],
str(Spin.down): [[] for l in range(self._nb_bands)]})
else:
proj_br_d.append(
{str(Spin.up): [[] for l in range(self._nb_bands)]})
if (sum_atoms is not None) and (sum_morbs is None):
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(
proj_br[branch][str(Spin.up)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_atoms:
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][
morb] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br[
'start_index'] + 1):
atoms_morbs = copy.deepcopy(
proj_br[branch][str(Spin.down)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_atoms:
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][
morb]
edict[elt + dictpa_d[elt][-1]][
morb] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.down)][i].append(edict)
elif (sum_atoms is None) and (sum_morbs is not None):
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(
proj_br[branch][str(Spin.up)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_morbs:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + anum][morb]
edict[elt + anum][
dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br[
'start_index'] + 1):
atoms_morbs = copy.deepcopy(
proj_br[branch][str(Spin.down)][i][j])
edict = {}
for elt in dictpa:
if elt in sum_morbs:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + anum][morb]
edict[elt + anum][
dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
proj_br_d[-1][str(Spin.down)][i].append(edict)
else:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br['start_index'] + 1):
atoms_morbs = copy.deepcopy(
proj_br[branch][str(Spin.up)][i][j])
edict = {}
for elt in dictpa:
if (elt in sum_atoms) and (elt in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + anum][morb]
edict[elt + anum][
dictio_d[elt][-1]] = sprojection
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio_d[elt][:-1]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][
morb] = sprojection
sprojection = 0.0
for anum in sum_atoms[elt]:
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][
dictio_d[elt][-1]] = sprojection
elif (elt in sum_atoms) and (
elt not in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][morb]
edict[elt + dictpa_d[elt][-1]][
morb] = sprojection
elif (elt not in sum_atoms) and (
elt in sum_morbs):
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + anum][morb]
edict[elt + anum][
dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
proj_br_d[-1][str(Spin.up)][i].append(edict)
if self._bs.is_spin_polarized:
for i in range(self._nb_bands):
for j in range(br['end_index'] - br[
'start_index'] + 1):
atoms_morbs = copy.deepcopy(
proj_br[branch][str(Spin.down)][i][j])
edict = {}
for elt in dictpa:
if (elt in sum_atoms) and (
elt in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + anum][morb]
edict[elt + anum][
dictio_d[elt][-1]] = sprojection
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio_d[elt][:-1]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][
morb]
edict[elt + dictpa_d[elt][-1]][
morb] = sprojection
sprojection = 0.0
for anum in sum_atoms[elt]:
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][
morb]
edict[elt + dictpa_d[elt][-1]][
dictio_d[elt][-1]] = sprojection
elif (elt in sum_atoms) and (
elt not in sum_morbs):
for anum in dictpa_d[elt][:-1]:
edict[elt + anum] = copy.deepcopy(
atoms_morbs[elt + anum])
edict[elt + dictpa_d[elt][-1]] = {}
for morb in dictio[elt]:
sprojection = 0.0
for anum in sum_atoms[elt]:
sprojection += \
atoms_morbs[elt + str(anum)][
morb]
edict[elt + dictpa_d[elt][-1]][
morb] = sprojection
elif (elt not in sum_atoms) and (
elt in sum_morbs):
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt][:-1]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
sprojection = 0.0
for morb in sum_morbs[elt]:
sprojection += \
atoms_morbs[elt + anum][morb]
edict[elt + anum][
dictio_d[elt][-1]] = sprojection
else:
for anum in dictpa_d[elt]:
edict[elt + anum] = {}
for morb in dictio_d[elt]:
edict[elt + anum][morb] = \
atoms_morbs[elt + anum][morb]
proj_br_d[-1][str(Spin.down)][i].append(edict)
return proj_br_d, dictio_d, dictpa_d, indices
def get_projected_plots_dots_patom_pmorb(self, dictio, dictpa,
sum_atoms=None, sum_morbs=None,
zero_to_efermi=True, ylim=None,
vbm_cbm_marker=False,
selected_branches=None,
w_h_size=(12, 8), num_column=None):
"""
Method returns a plot composed of subplots for different atoms and
orbitals (subshell orbitals such as 's', 'p', 'd' and 'f' defined by
azimuthal quantum numbers l = 0, 1, 2 and 3, respectively or
individual orbitals like 'px', 'py' and 'pz' defined by magnetic
quantum numbers m = -1, 1 and 0, respectively).
This is an extension of "get_projected_plots_dots" method.
Args:
dictio: The elements and the orbitals you need to project on. The
format is {Element:[Orbitals]}, for instance:
{'Cu':['dxy','s','px'],'O':['px','py','pz']} will give
projections for Cu on orbitals dxy, s, px and
for O on orbitals px, py, pz. If you want to sum over all
individual orbitals of subshell orbitals,
for example, 'px', 'py' and 'pz' of O, just simply set
{'Cu':['dxy','s','px'],'O':['p']} and set sum_morbs (see
explanations below) as {'O':[p],...}.
Otherwise, you will get an error.
dictpa: The elements and their sites (defined by site numbers) you
need to project on. The format is
{Element: [Site numbers]}, for instance: {'Cu':[1,5],'O':[3,4]}
will give projections for Cu on site-1
and on site-5, O on site-3 and on site-4 in the cell.
Attention:
The correct site numbers of atoms are consistent with
themselves in the structure computed. Normally,
the structure should be totally similar with POSCAR file,
however, sometimes VASP can rotate or
translate the cell. Thus, it would be safe if using Vasprun
class to get the final_structure and as a
result, correct index numbers of atoms.
sum_atoms: Sum projection of the similar atoms together (e.g.: Cu
on site-1 and Cu on site-5). The format is
{Element: [Site numbers]}, for instance:
{'Cu': [1,5], 'O': [3,4]} means summing projections over Cu on
site-1 and Cu on site-5 and O on site-3
and on site-4. If you do not want to use this functional, just
turn it off by setting sum_atoms = None.
sum_morbs: Sum projections of individual orbitals of similar atoms
together (e.g.: 'dxy' and 'dxz'). The
format is {Element: [individual orbitals]}, for instance:
{'Cu': ['dxy', 'dxz'], 'O': ['px', 'py']} means summing
projections over 'dxy' and 'dxz' of Cu and 'px'
and 'py' of O. If you do not want to use this functional, just
turn it off by setting sum_morbs = None.
selected_branches: The index of symmetry lines you chose for
plotting. This can be useful when the number of
symmetry lines (in KPOINTS file) are manny while you only want
to show for certain ones. The format is
[index of line], for instance:
[1, 3, 4] means you just need to do projection along lines
number 1, 3 and 4 while neglecting lines
number 2 and so on. By default, this is None type and all
symmetry lines will be plotted.
w_h_size: This variable help you to control the width and height
of figure. By default, width = 12 and
height = 8 (inches). The width/height ratio is kept the same
for subfigures and the size of each depends
on how many number of subfigures are plotted.
num_column: This variable help you to manage how the subfigures are
arranged in the figure by setting
up the number of columns of subfigures. The value should be an
int number. For example, num_column = 3
means you want to plot subfigures in 3 columns. By default,
num_column = None and subfigures are
aligned in 2 columns.
Returns:
A pylab object with different subfigures for different projections.
The blue and red colors lines are bands
for spin up and spin down. The green and cyan dots are projections
for spin up and spin down. The bigger
the green or cyan dots in the projected band structures, the higher
character for the corresponding elements
and orbitals. List of individual orbitals and their numbers (set up
by VASP and no special meaning):
s = 0; py = 1 pz = 2 px = 3; dxy = 4 dyz = 5 dz2 = 6 dxz = 7 dx2 = 8;
f_3 = 9 f_2 = 10 f_1 = 11 f0 = 12 f1 = 13 f2 = 14 f3 = 15
"""
dictio, sum_morbs = self._Orbitals_SumOrbitals(dictio, sum_morbs)
dictpa, sum_atoms, number_figs = self._number_of_subfigures(dictio,
dictpa,
sum_atoms,
sum_morbs)
print('Number of subfigures: %s' % str(number_figs))
if number_figs > 9:
print(
"The number of sub-figures %s might be too manny and the implementation might take a long time.\n"
"A smaller number or a plot with selected symmetry lines (selected_branches) might be better.\n"
% str(number_figs))
import math
from pymatgen.util.plotting import pretty_plot
band_linewidth = 0.5
plt = pretty_plot(w_h_size[0], w_h_size[1])
proj_br_d, dictio_d, dictpa_d, branches = self._get_projections_by_branches_patom_pmorb(
dictio, dictpa,
sum_atoms, sum_morbs, selected_branches)
data = self.bs_plot_data(zero_to_efermi)
e_min = -4
e_max = 4
if self._bs.is_metal():
e_min = -10
e_max = 10
count = 0
for elt in dictpa_d:
for numa in dictpa_d[elt]:
for o in dictio_d[elt]:
count += 1
if num_column is None:
if number_figs == 1:
plt.subplot(1, 1, 1)
else:
row = number_figs / 2
if number_figs % 2 == 0:
plt.subplot(row, 2, count)
else:
plt.subplot(row + 1, 2, count)
elif isinstance(num_column, int):
row = number_figs / num_column
if number_figs % num_column == 0:
plt.subplot(row, num_column, count)
else:
plt.subplot(row + 1, num_column, count)
else:
raise ValueError(
"The invalid 'num_column' is assigned. It should be an integer.")
plt, shift = self._maketicks_selected(plt, branches)
br = -1
for b in branches:
br += 1
for i in range(self._nb_bands):
plt.plot(list(map(lambda x: x - shift[br], data['distances'][b])),
[data['energy'][b][str(Spin.up)][i][j]
for j in
range(len(data['distances'][b]))],
'b-', linewidth=band_linewidth)
if self._bs.is_spin_polarized:
plt.plot(list(map(lambda x: x - shift[br], data['distances'][b])),
[data['energy'][b][str(Spin.down)][i][
j]
for j in
range(len(data['distances'][b]))],
'r--', linewidth=band_linewidth)
for j in range(len(
data['energy'][b][str(Spin.up)][i])):
plt.plot(
data['distances'][b][j] - shift[br],
data['energy'][b][str(Spin.down)][i][j],
'co', markersize= \
proj_br_d[br][str(Spin.down)][i][j][
elt + numa][o] * 15.0)
for j in range(
len(data['energy'][b][str(Spin.up)][i])):
plt.plot(data['distances'][b][j] - shift[br],
data['energy'][b][str(Spin.up)][i][j],
'go', markersize= \
proj_br_d[br][str(Spin.up)][i][j][
elt + numa][o] * 15.0)
if ylim is None:
if self._bs.is_metal():
if zero_to_efermi:
plt.ylim(e_min, e_max)
else:
plt.ylim(self._bs.efermi + e_min,
self._bs._efermi
+ e_max)
else:
if vbm_cbm_marker:
for cbm in data['cbm']:
plt.scatter(cbm[0], cbm[1], color='r',
marker='o',
s=100)
for vbm in data['vbm']:
plt.scatter(vbm[0], vbm[1], color='g',
marker='o',
s=100)
plt.ylim(data['vbm'][0][1] + e_min,
data['cbm'][0][1]
+ e_max)
else:
plt.ylim(ylim)
plt.title(elt + " " + numa + " " + str(o))
return plt
def _Orbitals_SumOrbitals(self, dictio, sum_morbs):
all_orbitals = ['s', 'p', 'd', 'f', 'px', 'py', 'pz', 'dxy', 'dyz',
'dxz', 'dx2', 'dz2',
'f_3', 'f_2', 'f_1', 'f0', 'f1', 'f2', 'f3']
individual_orbs = {'p': ['px', 'py', 'pz'],
'd': ['dxy', 'dyz', 'dxz', 'dx2', 'dz2'],
'f': ['f_3', 'f_2', 'f_1', 'f0', 'f1', 'f2', 'f3']}
if not isinstance(dictio, dict):
raise TypeError(
"The invalid type of 'dictio' was bound. It should be dict type.")
elif len(dictio.keys()) == 0:
raise KeyError("The 'dictio' is empty. We cannot do anything.")
else:
for elt in dictio:
if Element.is_valid_symbol(elt):
if isinstance(dictio[elt], list):
if len(dictio[elt]) == 0:
raise ValueError(
"The dictio[%s] is empty. We cannot do anything" % elt)
for orb in dictio[elt]:
if not isinstance(orb, str):
raise ValueError(
"The invalid format of orbitals is in 'dictio[%s]': %s. "
"They should be string." % (elt, str(orb)))
elif orb not in all_orbitals:
raise ValueError(
"The invalid name of orbital is given in 'dictio[%s]'." % elt)
else:
if orb in individual_orbs.keys():
if len(set(dictio[elt]).intersection(
individual_orbs[orb])) != 0:
raise ValueError(
"The 'dictio[%s]' contains orbitals repeated." % elt)
nelems = Counter(dictio[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError(
"You put in at least two similar orbitals in dictio[%s]." % elt)
else:
raise TypeError(
"The invalid type of value was put into 'dictio[%s]'. It should be list "
"type." % elt)
else:
raise KeyError(
"The invalid element was put into 'dictio' as a key: %s" % elt)
if sum_morbs is None:
print("You do not want to sum projection over orbitals.")
elif not isinstance(sum_morbs, dict):
raise TypeError(
"The invalid type of 'sum_orbs' was bound. It should be dict or 'None' type.")
elif len(sum_morbs.keys()) == 0:
raise KeyError("The 'sum_morbs' is empty. We cannot do anything")
else:
for elt in sum_morbs:
if Element.is_valid_symbol(elt):
if isinstance(sum_morbs[elt], list):
for orb in sum_morbs[elt]:
if not isinstance(orb, str):
raise TypeError(
"The invalid format of orbitals is in 'sum_morbs[%s]': %s. "
"They should be string." % (elt, str(orb)))
elif orb not in all_orbitals:
raise ValueError(
"The invalid name of orbital in 'sum_morbs[%s]' is given." % elt)
else:
if orb in individual_orbs.keys():
if len(set(sum_morbs[elt]).intersection(
individual_orbs[orb])) != 0:
raise ValueError(
"The 'sum_morbs[%s]' contains orbitals repeated." % elt)
nelems = Counter(sum_morbs[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError(
"You put in at least two similar orbitals in sum_morbs[%s]." % elt)
else:
raise TypeError(
"The invalid type of value was put into 'sum_morbs[%s]'. It should be list "
"type." % elt)
if elt not in dictio.keys():
raise ValueError(
"You cannot sum projection over orbitals of atoms '%s' because they are not "
"mentioned in 'dictio'." % elt)
else:
raise KeyError(
"The invalid element was put into 'sum_morbs' as a key: %s" % elt)
for elt in dictio:
if len(dictio[elt]) == 1:
if len(dictio[elt][0]) > 1:
if elt in sum_morbs.keys():
raise ValueError(
"You cannot sum projection over one individual orbital '%s' of '%s'." %
(dictio[elt][0], elt))
else:
if sum_morbs is None:
pass
elif elt not in sum_morbs.keys():
print(
"You do not want to sum projection over orbitals of element: %s" % elt)
else:
if len(sum_morbs[elt]) == 0:
raise ValueError(
"The empty list is an invalid value for sum_morbs[%s]." % elt)
elif len(sum_morbs[elt]) > 1:
for orb in sum_morbs[elt]:
if dictio[elt][0] not in orb:
raise ValueError(
"The invalid orbital '%s' was put into 'sum_morbs[%s]'." %
(orb, elt))
else:
if orb == 's' or len(orb) > 1:
raise ValueError(
"The invalid orbital '%s' was put into sum_orbs['%s']." % (
orb, elt))
else:
sum_morbs[elt] = individual_orbs[dictio[elt][0]]
dictio[elt] = individual_orbs[dictio[elt][0]]
else:
duplicate = copy.deepcopy(dictio[elt])
for orb in dictio[elt]:
if orb in individual_orbs.keys():
duplicate.remove(orb)
for o in individual_orbs[orb]:
duplicate.append(o)
dictio[elt] = copy.deepcopy(duplicate)
if sum_morbs is None:
pass
elif elt not in sum_morbs.keys():
print(
"You do not want to sum projection over orbitals of element: %s" % elt)
else:
if len(sum_morbs[elt]) == 0:
raise ValueError(
"The empty list is an invalid value for sum_morbs[%s]." % elt)
elif len(sum_morbs[elt]) == 1:
orb = sum_morbs[elt][0]
if orb == 's':
raise ValueError(
"We do not sum projection over only 's' orbital of the same "
"type of element.")
elif orb in individual_orbs.keys():
sum_morbs[elt].pop(0)
for o in individual_orbs[orb]:
sum_morbs[elt].append(o)
else:
raise ValueError(
"You never sum projection over one orbital in sum_morbs[%s]" % elt)
else:
duplicate = copy.deepcopy(sum_morbs[elt])
for orb in sum_morbs[elt]:
if orb in individual_orbs.keys():
duplicate.remove(orb)
for o in individual_orbs[orb]:
duplicate.append(o)
sum_morbs[elt] = copy.deepcopy(duplicate)
for orb in sum_morbs[elt]:
if orb not in dictio[elt]:
raise ValueError(
"The orbitals of sum_morbs[%s] conflict with those of dictio[%s]." %
(elt, elt))
return dictio, sum_morbs
def _number_of_subfigures(self, dictio, dictpa, sum_atoms, sum_morbs):
from pymatgen.core.periodic_table import Element
from collections import Counter
if (not isinstance(dictpa, dict)):
raise TypeError(
"The invalid type of 'dictpa' was bound. It should be dict type.")
elif len(dictpa.keys()) == 0:
raise KeyError("The 'dictpa' is empty. We cannot do anything.")
else:
for elt in dictpa:
if Element.is_valid_symbol(elt):
if isinstance(dictpa[elt], list):
if len(dictpa[elt]) == 0:
raise ValueError(
"The dictpa[%s] is empty. We cannot do anything" % elt)
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if list(_sites[i]._species.keys())[0].__eq__(
Element(elt)):
indices.append(i + 1)
for number in dictpa[elt]:
if isinstance(number, str):
if 'all' == number.lower():
dictpa[elt] = indices
print(
"You want to consider all '%s' atoms." % elt)
break
else:
raise ValueError(
"You put wrong site numbers in 'dictpa[%s]': %s." %
(elt, str(number)))
elif isinstance(number, int):
if number not in indices:
raise ValueError(
"You put wrong site numbers in 'dictpa[%s]': %s." %
(elt, str(number)))
else:
raise ValueError(
"You put wrong site numbers in 'dictpa[%s]': %s." % (
elt, str(number)))
nelems = Counter(dictpa[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError(
"You put at least two similar site numbers into 'dictpa[%s]'." % elt)
else:
raise TypeError(
"The invalid type of value was put into 'dictpa[%s]'. It should be list "
"type." % elt)
else:
raise KeyError(
"The invalid element was put into 'dictpa' as a key: %s" % elt)
if len(list(dictio.keys())) != len(list(dictpa.keys())):
raise KeyError(
"The number of keys in 'dictio' and 'dictpa' are not the same.")
else:
for elt in dictio.keys():
if elt not in dictpa.keys(): raise KeyError(
"The element '%s' is not in both dictpa and dictio." % elt)
for elt in dictpa.keys():
if elt not in dictio.keys(): raise KeyError(
"The element '%s' in not in both dictpa and dictio." % elt)
if sum_atoms is None:
print("You do not want to sum projection over atoms.")
elif (not isinstance(sum_atoms, dict)):
raise TypeError(
"The invalid type of 'sum_atoms' was bound. It should be dict type.")
elif len(sum_atoms.keys()) == 0:
raise KeyError("The 'sum_atoms' is empty. We cannot do anything.")
else:
for elt in sum_atoms:
if Element.is_valid_symbol(elt):
if isinstance(sum_atoms[elt], list):
if len(sum_atoms[elt]) == 0:
raise ValueError(
"The sum_atoms[%s] is empty. We cannot do anything" % elt)
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if list(_sites[i]._species.keys())[0].__eq__(
Element(elt)):
indices.append(i + 1)
for number in sum_atoms[elt]:
if isinstance(number, str):
if 'all' == number.lower():
sum_atoms[elt] = indices
print(
"You want to sum projection over all '%s' atoms." % elt)
break
else:
raise ValueError(
"You put wrong site numbers in 'sum_atoms[%s]'." % elt)
elif isinstance(number, int):
if number not in indices:
raise ValueError(
"You put wrong site numbers in 'sum_atoms[%s]'." % elt)
elif number not in dictpa[elt]:
raise ValueError(
"You cannot sum projection with atom number '%s' because it is not "
"metioned in dicpta[%s]" % (
str(number), elt))
else:
raise ValueError(
"You put wrong site numbers in 'sum_atoms[%s]'." % elt)
nelems = Counter(sum_atoms[elt]).values()
if sum(nelems) > len(nelems):
raise ValueError(
"You put at least two similar site numbers into 'sum_atoms[%s]'." % elt)
else:
raise TypeError(
"The invalid type of value was put into 'sum_atoms[%s]'. It should be list "
"type." % elt)
if elt not in dictpa.keys():
raise ValueError(
"You cannot sum projection over atoms '%s' because it is not "
"mentioned in 'dictio'." % elt)
else:
raise KeyError(
"The invalid element was put into 'sum_atoms' as a key: %s" % elt)
if len(sum_atoms[elt]) == 1:
raise ValueError(
"We do not sum projection over only one atom: %s" % elt)
max_number_figs = 0
decrease = 0
for elt in dictio:
max_number_figs += len(dictio[elt]) * len(dictpa[elt])
if (sum_atoms is None) and (sum_morbs is None):
number_figs = max_number_figs
elif (sum_atoms is not None) and (sum_morbs is None):
for elt in sum_atoms:
decrease += (len(sum_atoms[elt]) - 1) * len(dictio[elt])
number_figs = max_number_figs - decrease
elif (sum_atoms is None) and (sum_morbs is not None):
for elt in sum_morbs:
decrease += (len(sum_morbs[elt]) - 1) * len(dictpa[elt])
number_figs = max_number_figs - decrease
elif (sum_atoms is not None) and (sum_morbs is not None):
for elt in sum_atoms:
decrease += (len(sum_atoms[elt]) - 1) * len(dictio[elt])
for elt in sum_morbs:
if elt in sum_atoms:
decrease += (len(sum_morbs[elt]) - 1) * (
len(dictpa[elt]) - len(sum_atoms[elt]) + 1)
else:
decrease += (len(sum_morbs[elt]) - 1) * len(dictpa[elt])
number_figs = max_number_figs - decrease
else:
raise ValueError("Invalid format of 'sum_atoms' and 'sum_morbs'.")
return dictpa, sum_atoms, number_figs
def _summarize_keys_for_plot(self, dictio, dictpa, sum_atoms, sum_morbs):
from pymatgen.core.periodic_table import Element
individual_orbs = {'p': ['px', 'py', 'pz'],
'd': ['dxy', 'dyz', 'dxz', 'dx2', 'dz2'],
'f': ['f_3', 'f_2', 'f_1', 'f0', 'f1', 'f2', 'f3']}
def number_label(list_numbers):
list_numbers = sorted(list_numbers)
divide = [[]]
divide[0].append(list_numbers[0])
group = 0
for i in range(1, len(list_numbers)):
if list_numbers[i] == list_numbers[i - 1] + 1:
divide[group].append(list_numbers[i])
else:
group += 1
divide.append([list_numbers[i]])
label = ""
for elem in divide:
if len(elem) > 1:
label += str(elem[0]) + "-" + str(elem[-1]) + ","
else:
label += str(elem[0]) + ","
return label[:-1]
def orbital_label(list_orbitals):
divide = {}
for orb in list_orbitals:
if orb[0] in divide:
divide[orb[0]].append(orb)
else:
divide[orb[0]] = []
divide[orb[0]].append(orb)
label = ""
for elem in divide:
if elem == 's':
label += "s" + ","
else:
if len(divide[elem]) == len(individual_orbs[elem]):
label += elem + ","
else:
l = [o[1:] for o in divide[elem]]
label += elem + str(l).replace("['", "").replace("']",
"").replace(
"', '", "-") + ","
return label[:-1]
if (sum_atoms is None) and (sum_morbs is None):
dictio_d = dictio
dictpa_d = {elt: [str(anum) for anum in dictpa[elt]] for elt in
dictpa}
elif (sum_atoms is not None) and (sum_morbs is None):
dictio_d = dictio
dictpa_d = {}
for elt in dictpa:
dictpa_d[elt] = []
if elt in sum_atoms:
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if list(_sites[i]._species.keys())[0].__eq__(Element(elt)):
indices.append(i + 1)
flag_1 = len(set(dictpa[elt]).intersection(indices))
flag_2 = len(set(sum_atoms[elt]).intersection(indices))
if flag_1 == len(indices) and flag_2 == len(indices):
dictpa_d[elt].append('all')
else:
for anum in dictpa[elt]:
if anum not in sum_atoms[elt]:
dictpa_d[elt].append(str(anum))
label = number_label(sum_atoms[elt])
dictpa_d[elt].append(label)
else:
for anum in dictpa[elt]:
dictpa_d[elt].append(str(anum))
elif (sum_atoms is None) and (sum_morbs is not None):
dictio_d = {}
for elt in dictio:
dictio_d[elt] = []
if elt in sum_morbs:
for morb in dictio[elt]:
if morb not in sum_morbs[elt]:
dictio_d[elt].append(morb)
label = orbital_label(sum_morbs[elt])
dictio_d[elt].append(label)
else:
dictio_d[elt] = dictio[elt]
dictpa_d = {elt: [str(anum) for anum in dictpa[elt]] for elt in
dictpa}
else:
dictio_d = {}
for elt in dictio:
dictio_d[elt] = []
if elt in sum_morbs:
for morb in dictio[elt]:
if morb not in sum_morbs[elt]:
dictio_d[elt].append(morb)
label = orbital_label(sum_morbs[elt])
dictio_d[elt].append(label)
else:
dictio_d[elt] = dictio[elt]
dictpa_d = {}
for elt in dictpa:
dictpa_d[elt] = []
if elt in sum_atoms:
_sites = self._bs.structure.sites
indices = []
for i in range(0, len(_sites)):
if list(_sites[i]._species.keys())[0].__eq__(Element(elt)):
indices.append(i + 1)
flag_1 = len(set(dictpa[elt]).intersection(indices))
flag_2 = len(set(sum_atoms[elt]).intersection(indices))
if flag_1 == len(indices) and flag_2 == len(indices):
dictpa_d[elt].append('all')
else:
for anum in dictpa[elt]:
if anum not in sum_atoms[elt]:
dictpa_d[elt].append(str(anum))
label = number_label(sum_atoms[elt])
dictpa_d[elt].append(label)
else:
for anum in dictpa[elt]:
dictpa_d[elt].append(str(anum))
return dictio_d, dictpa_d
def _maketicks_selected(self, plt, branches):
"""
utility private method to add ticks to a band structure with selected branches
"""
ticks = self.get_ticks()
distance = []
label = []
rm_elems = []
for i in range(1, len(ticks['distance'])):
if ticks['label'][i] == ticks['label'][i - 1]:
rm_elems.append(i)
for i in range(len(ticks['distance'])):
if i not in rm_elems:
distance.append(ticks['distance'][i])
label.append(ticks['label'][i])
l_branches = [distance[i] - distance[i - 1] for i in
range(1, len(distance))]
n_distance = []
n_label = []
for branch in branches:
n_distance.append(l_branches[branch])
if ("$\\mid$" not in label[branch]) and (
"$\\mid$" not in label[branch + 1]):
n_label.append([label[branch], label[branch + 1]])
elif ("$\\mid$" in label[branch]) and (
"$\\mid$" not in label[branch + 1]):
n_label.append(
[label[branch].split("$")[-1], label[branch + 1]])
elif ("$\\mid$" not in label[branch]) and (
"$\\mid$" in label[branch + 1]):
n_label.append([label[branch], label[branch + 1].split("$")[0]])
else:
n_label.append([label[branch].split("$")[-1],
label[branch + 1].split("$")[0]])
f_distance = []
rf_distance = []
f_label = []
f_label.append(n_label[0][0])
f_label.append(n_label[0][1])
f_distance.append(0.0)
f_distance.append(n_distance[0])
rf_distance.append(0.0)
rf_distance.append(n_distance[0])
length = n_distance[0]
for i in range(1, len(n_distance)):
if n_label[i][0] == n_label[i - 1][1]:
f_distance.append(length)
f_distance.append(length + n_distance[i])
f_label.append(n_label[i][0])
f_label.append(n_label[i][1])
else:
f_distance.append(length + n_distance[i])
f_label[-1] = n_label[i - 1][1] + "$\\mid$" + n_label[i][0]
f_label.append(n_label[i][1])
rf_distance.append(length + n_distance[i])
length += n_distance[i]
n_ticks = {'distance': f_distance, 'label': f_label}
uniq_d = []
uniq_l = []
temp_ticks = list(zip(n_ticks['distance'], n_ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(n_ticks['label'])):
if n_ticks['label'][i] is not None:
# don't print the same label twice
if i != 0:
if n_ticks['label'][i] == n_ticks['label'][i - 1]:
logger.debug("already print label... "
"skipping label {i}".format(
i=n_ticks['label'][i]))
else:
logger.debug("Adding a line at {d}"
" for label {l}".format(
d=n_ticks['distance'][i], l=n_ticks['label'][i]))
plt.axvline(n_ticks['distance'][i], color='k')
else:
logger.debug("Adding a line at {d} for label {l}".format(
d=n_ticks['distance'][i], l=n_ticks['label'][i]))
plt.axvline(n_ticks['distance'][i], color='k')
shift = []
br = -1
for branch in branches:
br += 1
shift.append(distance[branch] - rf_distance[br])
return plt, shift
class BSDOSPlotter:
"""
A joint, aligned band structure and density of states plot. Contributions
from Jan Pohls as well as the online example from Germain Salvato-Vallverdu:
http://gvallver.perso.univ-pau.fr/?p=587
"""
def __init__(self, bs_projection="elements", dos_projection="elements",
vb_energy_range=4, cb_energy_range=4, fixed_cb_energy=False,
egrid_interval=1, font="Times New Roman", axis_fontsize=20,
tick_fontsize=15, legend_fontsize=14, bs_legend="best",
dos_legend="best", rgb_legend=True, fig_size=(11, 8.5)):
"""
Instantiate plotter settings.
Args:
bs_projection (str): "elements" or None
dos_projection (str): "elements", "orbitals", or None
vb_energy_range (float): energy in eV to show of valence bands
cb_energy_range (float): energy in eV to show of conduction bands
fixed_cb_energy (bool): If true, the cb_energy_range will be interpreted
as constant (i.e., no gap correction for cb energy)
egrid_interval (float): interval for grid marks
font (str): font family
axis_fontsize (float): font size for axis
tick_fontsize (float): font size for axis tick labels
legend_fontsize (float): font size for legends
bs_legend (str): matplotlib string location for legend or None
dos_legend (str): matplotlib string location for legend or None
rgb_legend (bool): (T/F) whether to draw RGB triangle/bar for element proj.
fig_size(tuple): dimensions of figure size (width, height)
"""
self.bs_projection = bs_projection
self.dos_projection = dos_projection
self.vb_energy_range = vb_energy_range
self.cb_energy_range = cb_energy_range
self.fixed_cb_energy = fixed_cb_energy
self.egrid_interval = egrid_interval
self.font = font
self.axis_fontsize = axis_fontsize
self.tick_fontsize = tick_fontsize
self.legend_fontsize = legend_fontsize
self.bs_legend = bs_legend
self.dos_legend = dos_legend
self.rgb_legend = rgb_legend
self.fig_size = fig_size
def get_plot(self, bs, dos=None):
"""
Get a matplotlib plot object.
Args:
bs (BandStructureSymmLine): the bandstructure to plot. Projection
data must exist for projected plots.
dos (Dos): the Dos to plot. Projection data must exist (i.e.,
CompleteDos) for projected plots.
Returns:
matplotlib.pyplot object on which you can call commands like show()
and savefig()
"""
import matplotlib.lines as mlines
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as mplt
# make sure the user-specified band structure projection is valid
bs_projection = self.bs_projection
if dos:
elements = [e.symbol for e in dos.structure.composition.elements]
elif bs_projection and bs.structure:
elements = [e.symbol for e in bs.structure.composition.elements]
else:
elements = []
rgb_legend = self.rgb_legend and bs_projection and \
bs_projection.lower() == "elements" and \
len(elements) in [2, 3]
if bs_projection and bs_projection.lower() == "elements" and \
(len(elements) not in [2, 3] or
not bs.get_projection_on_elements()):
warnings.warn(
"Cannot get element projected data; either the projection data "
"doesn't exist, or you don't have a compound with exactly 2 "
"or 3 unique elements.")
bs_projection = None
# specify energy range of plot
emin = -self.vb_energy_range
emax = self.cb_energy_range if self.fixed_cb_energy else \
self.cb_energy_range + bs.get_band_gap()["energy"]
# initialize all the k-point labels and k-point x-distances for bs plot
xlabels = [] # all symmetry point labels on x-axis
xlabel_distances = [] # positions of symmetry point x-labels
x_distances = [] # x positions of kpoint data
prev_right_klabel = None # used to determine which branches require a midline separator
for idx, l in enumerate(bs.branches):
# get left and right kpoint labels of this branch
left_k, right_k = l["name"].split("-")
# add $ notation for LaTeX kpoint labels
if left_k[0] == "\\" or "_" in left_k:
left_k = "$" + left_k + "$"
if right_k[0] == "\\" or "_" in right_k:
right_k = "$" + right_k + "$"
# add left k label to list of labels
if prev_right_klabel is None:
xlabels.append(left_k)
xlabel_distances.append(0)
elif prev_right_klabel != left_k: # used for pipe separator
xlabels[-1] = xlabels[-1] + "$\\mid$ " + left_k
# add right k label to list of labels
xlabels.append(right_k)
prev_right_klabel = right_k
# add x-coordinates for labels
left_kpoint = bs.kpoints[l["start_index"]].cart_coords
right_kpoint = bs.kpoints[l["end_index"]].cart_coords
distance = np.linalg.norm(right_kpoint - left_kpoint)
xlabel_distances.append(xlabel_distances[-1] + distance)
# add x-coordinates for kpoint data
npts = l["end_index"] - l["start_index"]
distance_interval = distance / npts
x_distances.append(xlabel_distances[-2])
for i in range(npts):
x_distances.append(x_distances[-1] + distance_interval)
# set up bs and dos plot
gs = GridSpec(1, 2, width_ratios=[2, 1]) if dos else GridSpec(1, 1)
fig = mplt.figure(figsize=self.fig_size)
fig.patch.set_facecolor('white')
bs_ax = mplt.subplot(gs[0])
if dos:
dos_ax = mplt.subplot(gs[1])
# set basic axes limits for the plot
bs_ax.set_xlim(0, x_distances[-1])
bs_ax.set_ylim(emin, emax)
if dos:
dos_ax.set_ylim(emin, emax)
# add BS xticks, labels, etc.
bs_ax.set_xticks(xlabel_distances)
bs_ax.set_xticklabels(xlabels, size=self.tick_fontsize)
bs_ax.set_xlabel('Wavevector $k$', fontsize=self.axis_fontsize,
family=self.font)
bs_ax.set_ylabel('$E-E_F$ / eV', fontsize=self.axis_fontsize,
family=self.font)
# add BS fermi level line at E=0 and gridlines
bs_ax.hlines(y=0, xmin=0, xmax=x_distances[-1], color="k", lw=2)
bs_ax.set_yticks(np.arange(emin, emax + 1E-5, self.egrid_interval))
bs_ax.set_yticklabels(np.arange(emin, emax + 1E-5, self.egrid_interval),
size=self.tick_fontsize)
bs_ax.set_axisbelow(True)
bs_ax.grid(color=[0.5, 0.5, 0.5], linestyle='dotted', linewidth=1)
if dos:
dos_ax.set_yticks(np.arange(emin, emax + 1E-5, self.egrid_interval))
dos_ax.set_yticklabels([])
dos_ax.grid(color=[0.5, 0.5, 0.5], linestyle='dotted', linewidth=1)
# renormalize the band energy to the Fermi level
band_energies = {}
for spin in (Spin.up, Spin.down):
if spin in bs.bands:
band_energies[spin] = []
for band in bs.bands[spin]:
band_energies[spin].append([e - bs.efermi for e in band])
# renormalize the DOS energies to Fermi level
if dos:
dos_energies = [e - dos.efermi for e in dos.energies]
# get the projection data to set colors for the band structure
colordata = self._get_colordata(bs, elements, bs_projection)
# plot the colored band structure lines
for spin in (Spin.up, Spin.down):
if spin in band_energies:
linestyles = "solid" if spin == Spin.up else "dotted"
for band_idx, band in enumerate(band_energies[spin]):
self._rgbline(bs_ax, x_distances, band,
colordata[spin][band_idx, :, 0],
colordata[spin][band_idx, :, 1],
colordata[spin][band_idx, :, 2],
linestyles=linestyles)
if dos:
# Plot the DOS and projected DOS
for spin in (Spin.up, Spin.down):
if spin in dos.densities:
# plot the total DOS
dos_densities = dos.densities[spin] * int(spin)
label = "total" if spin == Spin.up else None
dos_ax.plot(dos_densities, dos_energies,
color=(0.6, 0.6, 0.6), label=label)
dos_ax.fill_betweenx(dos_energies, 0,dos_densities,
color=(0.7, 0.7, 0.7),
facecolor=(0.7, 0.7, 0.7))
if self.dos_projection is None:
pass
elif self.dos_projection.lower() == "elements":
# plot the atom-projected DOS
colors = ['b', 'r', 'g', 'm', 'y', 'c', 'k', 'w']
el_dos = dos.get_element_dos()
for idx, el in enumerate(elements):
dos_densities = el_dos[Element(el)].densities[
spin] * int(spin)
label = el if spin == Spin.up else None
dos_ax.plot(dos_densities, dos_energies,
color=colors[idx], label=label)
elif self.dos_projection.lower() == "orbitals":
# plot each of the atomic projected DOS
colors = ['b', 'r', 'g', 'm']
spd_dos = dos.get_spd_dos()
for idx, orb in enumerate([OrbitalType.s,
OrbitalType.p,
OrbitalType.d,
OrbitalType.f]):
if orb in spd_dos:
dos_densities = spd_dos[orb].densities[spin] * \
int(spin)
label = orb if spin == Spin.up else None
dos_ax.plot(dos_densities, dos_energies,
color=colors[idx], label=label)
# get index of lowest and highest energy being plotted, used to help auto-scale DOS x-axis
emin_idx = next(x[0] for x in enumerate(dos_energies) if
x[1] >= emin)
emax_idx = len(dos_energies) - \
next(x[0] for x in enumerate(reversed(dos_energies))
if x[1] <= emax)
# determine DOS x-axis range
dos_xmin = 0 if Spin.down not in dos.densities else -max(
dos.densities[Spin.down][emin_idx:emax_idx + 1] * 1.05)
dos_xmax = max([max(dos.densities[Spin.up][emin_idx:emax_idx]) *
1.05, abs(dos_xmin)])
# set up the DOS x-axis and add Fermi level line
dos_ax.set_xlim(dos_xmin, dos_xmax)
dos_ax.set_xticklabels([])
dos_ax.hlines(y=0, xmin=dos_xmin, xmax=dos_xmax, color="k", lw=2)
dos_ax.set_xlabel('DOS', fontsize=self.axis_fontsize,
family=self.font)
# add legend for band structure
if self.bs_legend and not rgb_legend:
handles = []
if bs_projection is None:
handles = [mlines.Line2D([], [], linewidth=2,
color='k', label='spin up'),
mlines.Line2D([], [], linewidth=2,
color='b', linestyle="dotted",
label='spin down')]
elif bs_projection.lower() == "elements":
colors = ['b', 'r', 'g']
for idx, el in enumerate(elements):
handles.append(mlines.Line2D([], [],
linewidth=2,
color=colors[idx], label=el))
bs_ax.legend(handles=handles, fancybox=True,
prop={'size': self.legend_fontsize,
'family': self.font}, loc=self.bs_legend)
elif self.bs_legend and rgb_legend:
if len(elements) == 2:
self._rb_line(bs_ax, elements[1], elements[0],
loc=self.bs_legend)
elif len(elements) == 3:
self._rgb_triangle(bs_ax, elements[1], elements[2], elements[0],
loc=self.bs_legend)
# add legend for DOS
if dos and self.dos_legend:
dos_ax.legend(fancybox=True, prop={'size': self.legend_fontsize,
'family': self.font},
loc=self.dos_legend)
mplt.subplots_adjust(wspace=0.1)
return mplt
@staticmethod
def _rgbline(ax, k, e, red, green, blue, alpha=1, linestyles="solid"):
"""
An RGB colored line for plotting.
creation of segments based on:
http://nbviewer.ipython.org/urls/raw.github.com/dpsanders/matplotlib-examples/master/colorline.ipynb
Args:
ax: matplotlib axis
k: x-axis data (k-points)
e: y-axis data (energies)
red: red data
green: green data
blue: blue data
alpha: alpha values data
linestyles: linestyle for plot (e.g., "solid" or "dotted")
"""
from matplotlib.collections import LineCollection
pts = np.array([k, e]).T.reshape(-1, 1, 2)
seg = np.concatenate([pts[:-1], pts[1:]], axis=1)
nseg = len(k) - 1
r = [0.5 * (red[i] + red[i + 1]) for i in range(nseg)]
g = [0.5 * (green[i] + green[i + 1]) for i in range(nseg)]
b = [0.5 * (blue[i] + blue[i + 1]) for i in range(nseg)]
a = np.ones(nseg, np.float) * alpha
lc = LineCollection(seg, colors=list(zip(r, g, b, a)),
linewidth=2, linestyles=linestyles)
ax.add_collection(lc)
@staticmethod
def _get_colordata(bs, elements, bs_projection):
"""
Get color data, including projected band structures
Args:
bs: Bandstructure object
elements: elements (in desired order) for setting to blue, red, green
bs_projection: None for no projection, "elements" for element projection
Returns:
"""
contribs = {}
if bs_projection and bs_projection.lower() == "elements":
projections = bs.get_projection_on_elements()
for spin in (Spin.up, Spin.down):
if spin in bs.bands:
contribs[spin] = []
for band_idx in range(bs.nb_bands):
colors = []
for k_idx in range(len(bs.kpoints)):
if bs_projection and bs_projection.lower() == "elements":
c = [0, 0, 0]
projs = projections[spin][band_idx][k_idx]
# note: squared color interpolations are smoother
# see: https://youtu.be/LKnqECcg6Gw
projs = dict(
[(k, v ** 2) for k, v in projs.items()])
total = sum(projs.values())
if total > 0:
for idx, e in enumerate(elements):
c[idx] = math.sqrt(projs[
e] / total) # min is to handle round errors
c = [c[1], c[2],
c[0]] # prefer blue, then red, then green
else:
c = [0, 0, 0] if spin == Spin.up \
else [0, 0,
1] # black for spin up, blue for spin down
colors.append(c)
contribs[spin].append(colors)
contribs[spin] = np.array(contribs[spin])
return contribs
@staticmethod
def _rgb_triangle(ax, r_label, g_label, b_label, loc):
"""
Draw an RGB triangle legend on the desired axis
"""
if not loc in range(1, 11):
loc = 2
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
inset_ax = inset_axes(ax, width=1, height=1, loc=loc)
mesh = 35
x = []
y = []
color = []
for r in range(0, mesh):
for g in range(0, mesh):
for b in range(0, mesh):
if not (r == 0 and b == 0 and g == 0):
r1 = r / (r + g + b)
g1 = g / (r + g + b)
b1 = b / (r + g + b)
x.append(0.33 * (2. * g1 + r1) / (r1 + b1 + g1))
y.append(0.33 * np.sqrt(3) * r1 / (r1 + b1 + g1))
rc = math.sqrt(r ** 2 / (r ** 2 + g ** 2 + b ** 2))
gc = math.sqrt(g ** 2 / (r ** 2 + g ** 2 + b ** 2))
bc = math.sqrt(b ** 2 / (r ** 2 + g ** 2 + b ** 2))
color.append([rc, gc, bc])
# x = [n + 0.25 for n in x] # nudge x coordinates
# y = [n + (max_y - 1) for n in y] # shift y coordinates to top
# plot the triangle
inset_ax.scatter(x, y, s=7, marker='.', edgecolor=color)
inset_ax.set_xlim([-0.35, 1.00])
inset_ax.set_ylim([-0.35, 1.00])
# add the labels
inset_ax.text(0.70, -0.2, g_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='left')
inset_ax.text(0.325, 0.70, r_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='center')
inset_ax.text(-0.05, -0.2, b_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment='right')
inset_ax.get_xaxis().set_visible(False)
inset_ax.get_yaxis().set_visible(False)
@staticmethod
def _rb_line(ax, r_label, b_label, loc):
# Draw an rb bar legend on the desired axis
if not loc in range(1, 11):
loc = 2
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
inset_ax = inset_axes(ax, width=1.2, height=0.4, loc=loc)
x = []
y = []
color = []
for i in range(0, 1000):
x.append(i / 1800. + 0.55)
y.append(0)
color.append([math.sqrt(c) for c in
[1 - (i / 1000) ** 2, 0, (i / 1000) ** 2]])
# plot the bar
inset_ax.scatter(x, y, s=250., marker='s', edgecolor=color)
inset_ax.set_xlim([-0.1, 1.7])
inset_ax.text(1.35, 0, b_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment="left", verticalalignment="center")
inset_ax.text(0.30, 0, r_label, fontsize=13,
family='Times New Roman', color=(0, 0, 0),
horizontalalignment="right", verticalalignment="center")
inset_ax.get_xaxis().set_visible(False)
inset_ax.get_yaxis().set_visible(False)
class BoltztrapPlotter:
# TODO: We need a unittest for this. Come on folks.
"""
class containing methods to plot the data from Boltztrap.
Args:
bz: a BoltztrapAnalyzer object
"""
def __init__(self, bz):
self._bz = bz
def _plot_doping(self, temp):
import matplotlib.pyplot as plt
if len(self._bz.doping) != 0:
limit = 2.21e15
plt.axvline(self._bz.mu_doping['n'][temp][0], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['n'][temp][0] + 0.01,
limit,
"$n$=10$^{" + str(
math.log10(self._bz.doping['n'][0])) + "}$",
color='b')
plt.axvline(self._bz.mu_doping['n'][temp][-1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['n'][temp][-1] + 0.01,
limit,
"$n$=10$^{" + str(math.log10(self._bz.doping['n'][-1]))
+ "}$", color='b')
plt.axvline(self._bz.mu_doping['p'][temp][0], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['p'][temp][0] + 0.01,
limit,
"$p$=10$^{" + str(
math.log10(self._bz.doping['p'][0])) + "}$",
color='b')
plt.axvline(self._bz.mu_doping['p'][temp][-1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['p'][temp][-1] + 0.01,
limit, "$p$=10$^{" +
str(math.log10(self._bz.doping['p'][-1])) + "}$",
color='b')
def _plot_bg_limits(self):
import matplotlib.pyplot as plt
plt.axvline(0.0, color='k', linewidth=3.0)
plt.axvline(self._bz.gap, color='k', linewidth=3.0)
def plot_seebeck_eff_mass_mu(self, temps=[300], output='average',
Lambda=0.5):
"""
Plot respect to the chemical potential of the Seebeck effective mass
calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the seebeck effective mass calculated
using the average of the three diagonal components of the
seebeck tensor. 'tensor' returns the seebeck effective mass
respect to the three diagonal components of the seebeck tensor.
temps: list of temperatures of calculated seebeck.
Lambda: fitting parameter used to model the scattering (0.5 means
constant relaxation time).
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
for T in temps:
sbk_mass = self._bz.get_seebeck_eff_mass(output=output, temp=T,
Lambda=0.5)
# remove noise inside the gap
start = self._bz.mu_doping['p'][T][0]
stop = self._bz.mu_doping['n'][T][0]
mu_steps_1 = []
mu_steps_2 = []
sbk_mass_1 = []
sbk_mass_2 = []
for i, mu in enumerate(self._bz.mu_steps):
if mu <= start:
mu_steps_1.append(mu)
sbk_mass_1.append(sbk_mass[i])
elif mu >= stop:
mu_steps_2.append(mu)
sbk_mass_2.append(sbk_mass[i])
plt.plot(mu_steps_1, sbk_mass_1, label=str(T) + 'K', linewidth=3.0)
plt.plot(mu_steps_2, sbk_mass_2, linewidth=3.0)
if output == 'average':
plt.gca().get_lines()[1].set_c(plt.gca().get_lines()[0].get_c())
elif output == 'tensor':
plt.gca().get_lines()[3].set_c(plt.gca().get_lines()[0].get_c())
plt.gca().get_lines()[4].set_c(plt.gca().get_lines()[1].get_c())
plt.gca().get_lines()[5].set_c(plt.gca().get_lines()[2].get_c())
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.ylabel("Seebeck effective mass", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
if output == 'tensor':
plt.legend([str(i) + '_' + str(T) + 'K' for T in temps for i in
('x', 'y', 'z')], fontsize=20)
elif output == 'average':
plt.legend(fontsize=20)
plt.tight_layout()
return plt
def plot_complexity_factor_mu(self, temps=[300], output='average',
Lambda=0.5):
"""
Plot respect to the chemical potential of the Fermi surface complexity
factor calculated as explained in Ref.
Gibbs, Z. M. et al., Effective mass and fermi surface complexity factor
from ab initio band structure calculations.
npj Computational Materials 3, 8 (2017).
Args:
output: 'average' returns the complexity factor calculated using the average
of the three diagonal components of the seebeck and conductivity tensors.
'tensor' returns the complexity factor respect to the three
diagonal components of seebeck and conductivity tensors.
temps: list of temperatures of calculated seebeck and conductivity.
Lambda: fitting parameter used to model the scattering (0.5 means constant
relaxation time).
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
for T in temps:
cmplx_fact = self._bz.get_complexity_factor(output=output, temp=T,
Lambda=Lambda)
start = self._bz.mu_doping['p'][T][0]
stop = self._bz.mu_doping['n'][T][0]
mu_steps_1 = []
mu_steps_2 = []
cmplx_fact_1 = []
cmplx_fact_2 = []
for i, mu in enumerate(self._bz.mu_steps):
if mu <= start:
mu_steps_1.append(mu)
cmplx_fact_1.append(cmplx_fact[i])
elif mu >= stop:
mu_steps_2.append(mu)
cmplx_fact_2.append(cmplx_fact[i])
plt.plot(mu_steps_1, cmplx_fact_1, label=str(T) + 'K',
linewidth=3.0)
plt.plot(mu_steps_2, cmplx_fact_2, linewidth=3.0)
if output == 'average':
plt.gca().get_lines()[1].set_c(plt.gca().get_lines()[0].get_c())
elif output == 'tensor':
plt.gca().get_lines()[3].set_c(plt.gca().get_lines()[0].get_c())
plt.gca().get_lines()[4].set_c(plt.gca().get_lines()[1].get_c())
plt.gca().get_lines()[5].set_c(plt.gca().get_lines()[2].get_c())
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.ylabel("Complexity Factor", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
if output == 'tensor':
plt.legend([str(i) + '_' + str(T) + 'K' for T in temps for i in
('x', 'y', 'z')], fontsize=20)
elif output == 'average':
plt.legend(fontsize=20)
plt.tight_layout()
return plt
def plot_seebeck_mu(self, temp=600, output='eig', xlim=None):
"""
Plot the seebeck coefficient in function of Fermi level
Args:
temp:
the temperature
xlim:
a list of min and max fermi energy by default (0, and band gap)
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[
temp]
plt.plot(self._bz.mu_steps, seebeck,
linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['S$_1$', 'S$_2$', 'S$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim[0], xlim[1])
plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_conductivity_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the conductivity in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
cond = self._bz.get_conductivity(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.figure(figsize=(9, 7))
plt.semilogy(self._bz.mu_steps, cond, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['$\\Sigma_1$', '$\\Sigma_2$', '$\\Sigma_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylim([1e13 * relaxation_time, 1e20 * relaxation_time])
plt.ylabel("conductivity,\n $\\Sigma$ (1/($\\Omega$ m))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_power_factor_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("Power factor, ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_zt_mu(self, temp=600, output='eig', relaxation_time=1e-14,
xlim=None):
"""
Plot the ZT in function of Fermi level.
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
zt = self._bz.get_zt(relaxation_time=relaxation_time, output=output,
doping_levels=False)[temp]
plt.plot(self._bz.mu_steps, zt, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['ZT$_1$', 'ZT$_2$', 'ZT$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("ZT", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_seebeck_temp(self, doping='all', output='average'):
"""
Plot the Seebeck coefficient in function of temperature for different
doping levels.
Args:
dopings: the default 'all' plots all the doping levels in the analyzer.
Specify a list of doping levels if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
sbk = self._bz.get_seebeck(output='average')
elif output == 'eigs':
sbk = self._bz.get_seebeck(output='eigs')
plt.figure(figsize=(22, 14))
tlist = sorted(sbk['n'].keys())
doping = self._bz.doping['n'] if doping == 'all' else doping
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for dop in doping:
d = self._bz.doping[dt].index(dop)
sbk_temp = []
for temp in tlist:
sbk_temp.append(sbk[dt][temp][d])
if output == 'average':
plt.plot(tlist, sbk_temp, marker='s',
label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for xyz in range(3):
plt.plot(tlist, zip(*sbk_temp)[xyz], marker='s',
label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0)
plt.xlabel('Temperature (K)', fontsize=30.0)
p = 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_conductivity_temp(self, doping='all', output='average',
relaxation_time=1e-14):
"""
Plot the conductivity in function of temperature for different doping levels.
Args:
dopings: the default 'all' plots all the doping levels in the analyzer.
Specify a list of doping levels if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
if output == 'average':
cond = self._bz.get_conductivity(relaxation_time=relaxation_time,
output='average')
elif output == 'eigs':
cond = self._bz.get_conductivity(relaxation_time=relaxation_time,
output='eigs')
plt.figure(figsize=(22, 14))
tlist = sorted(cond['n'].keys())
doping = self._bz.doping['n'] if doping == 'all' else doping
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for dop in doping:
d = self._bz.doping[dt].index(dop)
cond_temp = []
for temp in tlist:
cond_temp.append(cond[dt][temp][d])
if output == 'average':
plt.plot(tlist, cond_temp, marker='s',
label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for xyz in range(3):
plt.plot(tlist, zip(*cond_temp)[xyz], marker='s',
label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("conductivity $\\sigma$ (1/($\\Omega$ m))",
fontsize=30.0)
plt.xlabel('Temperature (K)', fontsize=30.0)
p = '' # 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.tight_layout()
return plt
def plot_power_factor_temp(self, doping='all', output='average',
relaxation_time=1e-14):
"""
Plot the Power Factor in function of temperature for different doping levels.
Args:
dopings: the default 'all' plots all the doping levels in the analyzer.
Specify a list of doping levels if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output='average')
elif output == 'eigs':
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output='eigs')
plt.figure(figsize=(22, 14))
tlist = sorted(pf['n'].keys())
doping = self._bz.doping['n'] if doping == 'all' else doping
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for dop in doping:
d = self._bz.doping[dt].index(dop)
pf_temp = []
for temp in tlist:
pf_temp.append(pf[dt][temp][d])
if output == 'average':
plt.plot(tlist, pf_temp, marker='s',
label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for xyz in range(3):
plt.plot(tlist, zip(*pf_temp)[xyz], marker='s',
label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("Power Factor ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel('Temperature (K)', fontsize=30.0)
p = '' # 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.tight_layout()
return plt
def plot_zt_temp(self, doping='all', output='average',
relaxation_time=1e-14):
"""
Plot the figure of merit zT in function of temperature for different doping levels.
Args:
dopings: the default 'all' plots all the doping levels in the analyzer.
Specify a list of doping levels if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
zt = self._bz.get_zt(relaxation_time=relaxation_time,
output='average')
elif output == 'eigs':
zt = self._bz.get_zt(relaxation_time=relaxation_time, output='eigs')
plt.figure(figsize=(22, 14))
tlist = sorted(zt['n'].keys())
doping = self._bz.doping['n'] if doping == 'all' else doping
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for dop in doping:
d = self._bz.doping[dt].index(dop)
zt_temp = []
for temp in tlist:
zt_temp.append(zt[dt][temp][d])
if output == 'average':
plt.plot(tlist, zt_temp, marker='s',
label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for xyz in range(3):
plt.plot(tlist, zip(*zt_temp)[xyz], marker='s',
label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("zT", fontsize=30.0)
plt.xlabel('Temperature (K)', fontsize=30.0)
p = '' # 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_eff_mass_temp(self, doping='all', output='average'):
"""
Plot the average effective mass in function of temperature
for different doping levels.
Args:
dopings: the default 'all' plots all the doping levels in the analyzer.
Specify a list of doping levels if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
em = self._bz.get_average_eff_mass(output='average')
elif output == 'eigs':
em = self._bz.get_average_eff_mass(output='eigs')
plt.figure(figsize=(22, 14))
tlist = sorted(em['n'].keys())
doping = self._bz.doping['n'] if doping == 'all' else doping
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for dop in doping:
d = self._bz.doping[dt].index(dop)
em_temp = []
for temp in tlist:
em_temp.append(em[dt][temp][d])
if output == 'average':
plt.plot(tlist, em_temp, marker='s',
label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for xyz in range(3):
plt.plot(tlist, zip(*em_temp)[xyz], marker='s',
label=str(xyz) + ' ' + str(dop) + ' $cm^{-3}$')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("Effective mass (m$_e$)", fontsize=30.0)
plt.xlabel('Temperature (K)', fontsize=30.0)
p = '' # 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_seebeck_dop(self, temps='all', output='average'):
"""
Plot the Seebeck in function of doping levels for different temperatures.
Args:
temps: the default 'all' plots all the temperatures in the analyzer.
Specify a list of temperatures if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
sbk = self._bz.get_seebeck(output='average')
elif output == 'eigs':
sbk = self._bz.get_seebeck(output='eigs')
tlist = sorted(sbk['n'].keys()) if temps == 'all' else temps
plt.figure(figsize=(22, 14))
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for temp in tlist:
if output == 'eigs':
for xyz in range(3):
plt.semilogx(self._bz.doping[dt],
zip(*sbk[dt][temp])[xyz],
marker='s',
label=str(xyz) + ' ' + str(temp) + ' K')
elif output == 'average':
plt.semilogx(self._bz.doping[dt], sbk[dt][temp],
marker='s', label=str(temp) + ' K')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("Seebeck coefficient ($\\mu$V/K)", fontsize=30.0)
plt.xlabel('Doping concentration (cm$^{-3}$)', fontsize=30.0)
p = 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_conductivity_dop(self, temps='all', output='average',
relaxation_time=1e-14):
"""
Plot the conductivity in function of doping levels for different
temperatures.
Args:
temps: the default 'all' plots all the temperatures in the analyzer.
Specify a list of temperatures if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
cond = self._bz.get_conductivity(relaxation_time=relaxation_time,
output='average')
elif output == 'eigs':
cond = self._bz.get_conductivity(relaxation_time=relaxation_time,
output='eigs')
tlist = sorted(cond['n'].keys()) if temps == 'all' else temps
plt.figure(figsize=(22, 14))
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for temp in tlist:
if output == 'eigs':
for xyz in range(3):
plt.semilogx(self._bz.doping[dt],
zip(*cond[dt][temp])[xyz],
marker='s',
label=str(xyz) + ' ' + str(temp) + ' K')
elif output == 'average':
plt.semilogx(self._bz.doping[dt], cond[dt][temp],
marker='s', label=str(temp) + ' K')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("conductivity $\\sigma$ (1/($\\Omega$ m))",
fontsize=30.0)
plt.xlabel('Doping concentration ($cm^{-3}$)', fontsize=30.0)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.legend(fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_power_factor_dop(self, temps='all', output='average',
relaxation_time=1e-14):
"""
Plot the Power Factor in function of doping levels for different temperatures.
Args:
temps: the default 'all' plots all the temperatures in the analyzer.
Specify a list of temperatures if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output='average')
elif output == 'eigs':
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output='eigs')
tlist = sorted(pf['n'].keys()) if temps == 'all' else temps
plt.figure(figsize=(22, 14))
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for temp in tlist:
if output == 'eigs':
for xyz in range(3):
plt.semilogx(self._bz.doping[dt],
zip(*pf[dt][temp])[xyz],
marker='s',
label=str(xyz) + ' ' + str(temp) + ' K')
elif output == 'average':
plt.semilogx(self._bz.doping[dt], pf[dt][temp],
marker='s', label=str(temp) + ' K')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("Power Factor ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel('Doping concentration ($cm^{-3}$)', fontsize=30.0)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
p = '' # 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_zt_dop(self, temps='all', output='average', relaxation_time=1e-14):
"""
Plot the figure of merit zT in function of doping levels for different
temperatures.
Args:
temps: the default 'all' plots all the temperatures in the analyzer.
Specify a list of temperatures if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
zt = self._bz.get_zt(relaxation_time=relaxation_time,
output='average')
elif output == 'eigs':
zt = self._bz.get_zt(relaxation_time=relaxation_time, output='eigs')
tlist = sorted(zt['n'].keys()) if temps == 'all' else temps
plt.figure(figsize=(22, 14))
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for temp in tlist:
if output == 'eigs':
for xyz in range(3):
plt.semilogx(self._bz.doping[dt],
zip(*zt[dt][temp])[xyz],
marker='s',
label=str(xyz) + ' ' + str(temp) + ' K')
elif output == 'average':
plt.semilogx(self._bz.doping[dt], zt[dt][temp],
marker='s', label=str(temp) + ' K')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("zT", fontsize=30.0)
plt.xlabel('Doping concentration ($cm^{-3}$)', fontsize=30.0)
p = 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_eff_mass_dop(self, temps='all', output='average'):
"""
Plot the average effective mass in function of doping levels
for different temperatures.
Args:
temps: the default 'all' plots all the temperatures in the analyzer.
Specify a list of temperatures if you want to plot only some.
output: with 'average' you get an average of the three directions
with 'eigs' you get all the three directions.
relaxation_time: specify a constant relaxation time value
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
if output == 'average':
em = self._bz.get_average_eff_mass(output='average')
elif output == 'eigs':
em = self._bz.get_average_eff_mass(output='eigs')
tlist = sorted(em['n'].keys()) if temps == 'all' else temps
plt.figure(figsize=(22, 14))
for i, dt in enumerate(['n', 'p']):
plt.subplot(121 + i)
for temp in tlist:
if output == 'eigs':
for xyz in range(3):
plt.semilogx(self._bz.doping[dt],
zip(*em[dt][temp])[xyz],
marker='s',
label=str(xyz) + ' ' + str(temp) + ' K')
elif output == 'average':
plt.semilogx(self._bz.doping[dt], em[dt][temp],
marker='s', label=str(temp) + ' K')
plt.title(dt + '-type', fontsize=20)
if i == 0:
plt.ylabel("Effective mass (m$_e$)", fontsize=30.0)
plt.xlabel('Doping concentration ($cm^{-3}$)', fontsize=30.0)
p = 'lower right' if i == 0 else ''
plt.legend(loc=p, fontsize=15)
plt.grid()
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
def plot_dos(self, sigma=0.05):
"""
plot dos
Args:
sigma: a smearing
Returns:
a matplotlib object
"""
plotter = DosPlotter(sigma=sigma)
plotter.add_dos("t", self._bz.dos)
return plotter.get_plot()
def plot_carriers(self, temp=300):
"""
Plot the carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.semilogy(self._bz.mu_steps,
abs(self._bz._carrier_conc[temp] / (self._bz.vol * 1e-24)),
linewidth=3.0, color='r')
self._plot_bg_limits()
self._plot_doping(temp)
plt.xlim(-0.5, self._bz.gap + 0.5)
plt.ylim(1e14, 1e22)
plt.ylabel("carrier concentration (cm-3)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_hall_carriers(self, temp=300):
"""
Plot the Hall carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
hall_carriers = [abs(i) for i in
self._bz.get_hall_carrier_concentration()[temp]]
plt.semilogy(self._bz.mu_steps,
hall_carriers,
linewidth=3.0, color='r')
self._plot_bg_limits()
self._plot_doping(temp)
plt.xlim(-0.5, self._bz.gap + 0.5)
plt.ylim(1e14, 1e22)
plt.ylabel("Hall carrier concentration (cm-3)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
class CohpPlotter:
"""
Class for plotting crystal orbital Hamilton populations (COHPs) or
crystal orbital overlap populations (COOPs). It is modeled after the
DosPlotter object.
Args/attributes:
zero_at_efermi: Whether to shift all populations to have zero
energy at the Fermi level. Defaults to True.
are_coops: Switch to indicate that these are COOPs, not COHPs.
Defaults to False for COHPs.
"""
def __init__(self, zero_at_efermi=True, are_coops=False):
self.zero_at_efermi = zero_at_efermi
self.are_coops = are_coops
self._cohps = OrderedDict()
def add_cohp(self, label, cohp):
"""
Adds a COHP for plotting.
Args:
label: Label for the COHP. Must be unique.
cohp: COHP object.
"""
energies = cohp.energies - cohp.efermi if self.zero_at_efermi \
else cohp.energies
populations = cohp.get_cohp()
int_populations = cohp.get_icohp()
self._cohps[label] = {"energies": energies, "COHP": populations,
"ICOHP": int_populations, "efermi": cohp.efermi}
def add_cohp_dict(self, cohp_dict, key_sort_func=None):
"""
Adds a dictionary of COHPs with an optional sorting function
for the keys.
Args:
cohp_dict: dict of the form {label: Cohp}
key_sort_func: function used to sort the cohp_dict keys.
"""
if key_sort_func:
keys = sorted(cohp_dict.keys(), key=key_sort_func)
else:
keys = cohp_dict.keys()
for label in keys:
self.add_cohp(label, cohp_dict[label])
def get_cohp_dict(self):
"""
Returns the added COHPs as a json-serializable dict. Note that if you
have specified smearing for the COHP plot, the populations returned
will be the smeared and not the original populations.
Returns:
dict: Dict of COHP data of the form {label: {"efermi": efermi,
"energies": ..., "COHP": {Spin.up: ...}, "ICOHP": ...}}.
"""
return jsanitize(self._cohps)
def get_plot(self, xlim=None, ylim=None, plot_negative=None,
integrated=False, invert_axes=True):
"""
Get a matplotlib plot showing the COHP.
Args:
xlim: Specifies the x-axis limits. Defaults to None for
automatic determination.
ylim: Specifies the y-axis limits. Defaults to None for
automatic determination.
plot_negative: It is common to plot -COHP(E) so that the
sign means the same for COOPs and COHPs. Defaults to None
for automatic determination: If are_coops is True, this
will be set to False, else it will be set to True.
integrated: Switch to plot ICOHPs. Defaults to False.
invert_axes: Put the energies onto the y-axis, which is
common in chemistry.
Returns:
A matplotlib object.
"""
if self.are_coops:
cohp_label = "COOP"
else:
cohp_label = "COHP"
if plot_negative is None:
plot_negative = True if not self.are_coops else False
if integrated:
cohp_label = "I" + cohp_label + " (eV)"
if plot_negative:
cohp_label = "-" + cohp_label
if self.zero_at_efermi:
energy_label = "$E - E_f$ (eV)"
else:
energy_label = "$E$ (eV)"
ncolors = max(3, len(self._cohps))
ncolors = min(9, ncolors)
import palettable
colors = palettable.colorbrewer.qualitative.Set1_9.mpl_colors
plt = pretty_plot(12, 8)
allpts = []
keys = self._cohps.keys()
for i, key in enumerate(keys):
energies = self._cohps[key]["energies"]
if not integrated:
populations = self._cohps[key]["COHP"]
else:
populations = self._cohps[key]["ICOHP"]
for spin in [Spin.up, Spin.down]:
if spin in populations:
if invert_axes:
x = -populations[spin] if plot_negative \
else populations[spin]
y = energies
else:
x = energies
y = -populations[spin] if plot_negative \
else populations[spin]
allpts.extend(list(zip(x, y)))
if spin == Spin.up:
plt.plot(x, y, color=colors[i % ncolors],
linestyle='-', label=str(key), linewidth=3)
else:
plt.plot(x, y, color=colors[i % ncolors],
linestyle='--', linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
xlim = plt.xlim()
ylim = plt.ylim()
if not invert_axes:
plt.plot(xlim, [0, 0], "k-", linewidth=2)
if self.zero_at_efermi:
plt.plot([0, 0], ylim, "k--", linewidth=2)
else:
plt.plot([self._cohps[key]['efermi'],
self._cohps[key]['efermi']], ylim,
color=colors[i % ncolors],
linestyle='--', linewidth=2)
else:
plt.plot([0, 0], ylim, "k-", linewidth=2)
if self.zero_at_efermi:
plt.plot(xlim, [0, 0], "k--", linewidth=2)
else:
plt.plot(xlim, [self._cohps[key]['efermi'],
self._cohps[key]['efermi']],
color=colors[i % ncolors],
linestyle='--', linewidth=2)
if invert_axes:
plt.xlabel(cohp_label)
plt.ylabel(energy_label)
else:
plt.xlabel(energy_label)
plt.ylabel(cohp_label)
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: File name to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Defaults to None for
automatic determination.
ylim: Specifies the y-axis limits. Defaults to None for
automatic determination.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Defaults to None for
automatic determination.
ylim: Specifies the y-axis limits. Defaults to None for
automatic determination.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
def plot_fermi_surface(data, structure, cbm, energy_levels=[],
multiple_figure=True,
mlab_figure=None, kpoints_dict={}, color=(0, 0, 1),
transparency_factor=[], labels_scale_factor=0.05,
points_scale_factor=0.02, interative=True):
"""
Plot the Fermi surface at specific energy value.
Args:
data: energy values in a 3D grid from a CUBE file
via read_cube_file function, or from a
BoltztrapAnalyzer.fermi_surface_data
structure: structure object of the material
energy_levels: list of energy value of the fermi surface.
By default 0 eV correspond to the VBM, as in
the plot of band structure along symmetry line.
Default: max energy value + 0.01 eV
cbm: Boolean value to specify if the considered band is
a conduction band or not
multiple_figure: if True a figure for each energy level will be shown.
If False all the surfaces will be shown in the same figure.
In this las case, tune the transparency factor.
mlab_figure: provide a previous figure to plot a new surface on it.
kpoints_dict: dictionary of kpoints to show in the plot.
example: {"K":[0.5,0.0,0.5]},
where the coords are fractional.
color: tuple (r,g,b) of integers to define the color of the surface.
transparency_factor: list of values in the range [0,1] to tune
the opacity of the surfaces.
labels_scale_factor: factor to tune the size of the kpoint labels
points_scale_factor: factor to tune the size of the kpoint points
interative: if True an interactive figure will be shown.
If False a non interactive figure will be shown, but
it is possible to plot other surfaces on the same figure.
To make it interactive, run mlab.show().
Returns:
a Mayavi figure and a mlab module to control the plot.
Note: Experimental.
Please, double check the surface shown by using some
other software and report issues.
"""
try:
from mayavi import mlab
except ImportError:
raise BoltztrapError(
"Mayavi package should be installed to use this function")
bz = structure.lattice.reciprocal_lattice.get_wigner_seitz_cell()
cell = structure.lattice.reciprocal_lattice.matrix
fact = 1 if cbm == False else -1
en_min = np.min(fact * data.ravel())
en_max = np.max(fact * data.ravel())
if energy_levels == []:
energy_levels = [en_min + 0.01] if cbm == True else \
[en_max - 0.01]
print("Energy level set to: " + str(energy_levels[0]) + " eV")
else:
for e in energy_levels:
if e > en_max or e < en_min:
raise BoltztrapError("energy level " + str(e) +
" not in the range of possible energies: [" +
str(en_min) + ", " + str(en_max) + "]")
if transparency_factor == []:
transparency_factor = [1] * len(energy_levels)
if mlab_figure:
fig = mlab_figure
if mlab_figure == None and not multiple_figure:
fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0),
tube_radius=None, figure=fig)
for label, coords in kpoints_dict.items():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor,
color=(0, 0, 0), figure=fig)
mlab.text3d(*label_coords, text=label, scale=labels_scale_factor,
color=(0, 0, 0), figure=fig)
for isolevel, alpha in zip(energy_levels, transparency_factor):
if multiple_figure:
fig = mlab.figure(size=(1024, 768), bgcolor=(1, 1, 1))
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(np.all(line[0] == x)
for x in bz[jface]) and \
any(np.all(line[1] == x)
for x in bz[jface]):
mlab.plot3d(*zip(line[0], line[1]), color=(0, 0, 0),
tube_radius=None, figure=fig)
for label, coords in kpoints_dict.items():
label_coords = structure.lattice.reciprocal_lattice \
.get_cartesian_coords(coords)
mlab.points3d(*label_coords, scale_factor=points_scale_factor,
color=(0, 0, 0), figure=fig)
mlab.text3d(*label_coords, text=label,
scale=labels_scale_factor, color=(0, 0, 0),
figure=fig)
cp = mlab.contour3d(fact * data, contours=[isolevel], transparent=True,
colormap='hot', color=color, opacity=alpha,
figure=fig)
polydata = cp.actor.actors[0].mapper.input
pts = np.array(polydata.points) # - 1
polydata.points = np.dot(pts,
cell / np.array(data.shape)[:, np.newaxis])
cx, cy, cz = [np.mean(np.array(polydata.points)[:, i])
for i in range(3)]
polydata.points = (np.array(polydata.points) - [cx, cy, cz]) * 2
#mlab.view(distance='auto')
fig.scene.isometric_view()
if interative == True:
mlab.show()
return fig, mlab
def plot_wigner_seitz(lattice, ax=None, **kwargs):
"""
Adds the skeleton of the Wigner-Seitz cell of the lattice to a matplotlib Axes
Args:
lattice: Lattice object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to black
and linewidth to 1.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "k"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 1
bz = lattice.get_wigner_seitz_cell()
ax, fig, plt = get_ax3d_fig_plt(ax)
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and any(
np.all(line[0] == x) for x in bz[jface]) \
and any(np.all(line[1] == x) for x in bz[jface]):
ax.plot(*zip(line[0], line[1]), **kwargs)
return fig, ax
def plot_lattice_vectors(lattice, ax=None, **kwargs):
"""
Adds the basis vectors of the lattice provided to a matplotlib Axes
Args:
lattice: Lattice object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to green
and linewidth to 3.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "g"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 3
vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0])
vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0])
ax.plot(*zip(vertex1, vertex2), **kwargs)
return fig, ax
def plot_path(line, lattice=None, coords_are_cartesian=False, ax=None,
**kwargs):
"""
Adds a line passing through the coordinates listed in 'line' to a matplotlib Axes
Args:
line: list of coordinates.
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'plot'. Color defaults to red
and linewidth to 3.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "r"
if "linewidth" not in kwargs:
kwargs["linewidth"] = 3
for k in range(1, len(line)):
vertex1 = line[k - 1]
vertex2 = line[k]
if not coords_are_cartesian:
if lattice is None:
raise ValueError(
"coords_are_cartesian False requires the lattice")
vertex1 = lattice.get_cartesian_coords(vertex1)
vertex2 = lattice.get_cartesian_coords(vertex2)
ax.plot(*zip(vertex1, vertex2), **kwargs)
return fig, ax
def plot_labels(labels, lattice=None, coords_are_cartesian=False, ax=None,
**kwargs):
"""
Adds labels to a matplotlib Axes
Args:
labels: dict containing the label as a key and the coordinates as value.
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing.
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'text'. Color defaults to blue
and size to 25.
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "b"
if "size" not in kwargs:
kwargs["size"] = 25
for k, coords in labels.items():
label = k
if k.startswith("\\") or k.find("_") != -1:
label = "$" + k + "$"
off = 0.01
if coords_are_cartesian:
coords = np.array(coords)
else:
if lattice is None:
raise ValueError(
"coords_are_cartesian False requires the lattice")
coords = lattice.get_cartesian_coords(coords)
ax.text(*(coords + off), s=label, **kwargs)
return fig, ax
def fold_point(p, lattice, coords_are_cartesian=False):
"""
Folds a point with coordinates p inside the first Brillouin zone of the lattice.
Args:
p: coordinates of one point
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Returns:
The cartesian coordinates folded inside the first Brillouin zone
"""
if coords_are_cartesian:
p = lattice.get_fractional_coords(p)
else:
p = np.array(p)
p = np.mod(p + 0.5 - 1e-10, 1) - 0.5 + 1e-10
p = lattice.get_cartesian_coords(p)
closest_lattice_point = None
smallest_distance = 10000
for i in (-1, 0, 1):
for j in (-1, 0, 1):
for k in (-1, 0, 1):
lattice_point = np.dot((i, j, k), lattice.matrix)
dist = np.linalg.norm(p - lattice_point)
if closest_lattice_point is None or dist < smallest_distance:
closest_lattice_point = lattice_point
smallest_distance = dist
if not np.allclose(closest_lattice_point, (0, 0, 0)):
p = p - closest_lattice_point
return p
def plot_points(points, lattice=None, coords_are_cartesian=False, fold=False,
ax=None, **kwargs):
"""
Adds Points to a matplotlib Axes
Args:
points: list of coordinates
lattice: Lattice object used to convert from reciprocal to cartesian coordinates
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
Requires lattice if False.
fold: whether the points should be folded inside the first Brillouin Zone.
Defaults to False. Requires lattice if True.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: kwargs passed to the matplotlib function 'scatter'. Color defaults to blue
Returns:
matplotlib figure and matplotlib ax
"""
ax, fig, plt = get_ax3d_fig_plt(ax)
if "color" not in kwargs:
kwargs["color"] = "b"
if (not coords_are_cartesian or fold) and lattice is None:
raise ValueError(
"coords_are_cartesian False or fold True require the lattice")
for p in points:
if fold:
p = fold_point(p, lattice,
coords_are_cartesian=coords_are_cartesian)
elif not coords_are_cartesian:
p = lattice.get_cartesian_coords(p)
ax.scatter(*p, **kwargs)
return fig, ax
@add_fig_kwargs
def plot_brillouin_zone_from_kpath(kpath, ax=None, **kwargs):
"""
Gives the plot (as a matplotlib object) of the symmetry line path in
the Brillouin Zone.
Args:
kpath (HighSymmKpath): a HighSymmKPath object
ax: matplotlib :class:`Axes` or None if a new figure should be created.
**kwargs: provided by add_fig_kwargs decorator
Returns:
matplotlib figure
"""
lines = [[kpath.kpath['kpoints'][k] for k in p]
for p in kpath.kpath['path']]
return plot_brillouin_zone(bz_lattice=kpath.prim_rec, lines=lines, ax=ax,
labels=kpath.kpath['kpoints'], **kwargs)
@add_fig_kwargs
def plot_brillouin_zone(bz_lattice, lines=None, labels=None, kpoints=None,
fold=False, coords_are_cartesian=False,
ax=None, **kwargs):
"""
Plots a 3D representation of the Brillouin zone of the structure.
Can add to the plot paths, labels and kpoints
Args:
bz_lattice: Lattice object of the Brillouin zone
lines: list of lists of coordinates. Each list represent a different path
labels: dict containing the label as a key and the coordinates as value.
kpoints: list of coordinates
fold: whether the points should be folded inside the first Brillouin Zone.
Defaults to False. Requires lattice if True.
coords_are_cartesian: Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
kwargs: provided by add_fig_kwargs decorator
Returns:
matplotlib figure
"""
fig, ax = plot_lattice_vectors(bz_lattice, ax=ax)
plot_wigner_seitz(bz_lattice, ax=ax)
if lines is not None:
for line in lines:
plot_path(line, bz_lattice,
coords_are_cartesian=coords_are_cartesian, ax=ax)
if labels is not None:
plot_labels(labels, bz_lattice,
coords_are_cartesian=coords_are_cartesian, ax=ax)
plot_points(labels.values(), bz_lattice,
coords_are_cartesian=coords_are_cartesian,
fold=False, ax=ax)
if kpoints is not None:
plot_points(kpoints, bz_lattice,
coords_are_cartesian=coords_are_cartesian,
ax=ax, fold=fold)
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
ax.set_aspect('equal')
ax.axis("off")
return fig
def plot_ellipsoid(hessian, center, lattice=None, rescale=1.0, ax=None,
coords_are_cartesian=False, arrows=False, **kwargs):
"""
Plots a 3D ellipsoid rappresenting the Hessian matrix in input.
Useful to get a graphical visualization of the effective mass
of a band in a single k-point.
Args:
hessian: the Hessian matrix
center: the center of the ellipsoid in reciprocal coords (Default)
lattice: Lattice object of the Brillouin zone
rescale: factor for size scaling of the ellipsoid
ax: matplotlib :class:`Axes` or None if a new figure should be created.
coords_are_cartesian: Set to True if you are providing a center in
cartesian coordinates. Defaults to False.
kwargs: kwargs passed to the matplotlib function 'plot_wireframe'.
Color defaults to blue, rstride and cstride
default to 4, alpha defaults to 0.2.
Returns:
matplotlib figure and matplotlib ax
Example of use:
fig,ax=plot_wigner_seitz(struct.reciprocal_lattice)
plot_ellipsoid(hessian,[0.0,0.0,0.0], struct.reciprocal_lattice,ax=ax)
"""
if (not coords_are_cartesian) and lattice is None:
raise ValueError(
"coords_are_cartesian False or fold True require the lattice")
if not coords_are_cartesian:
center = lattice.get_cartesian_coords(center)
if "color" not in kwargs:
kwargs["color"] = "b"
if "rstride" not in kwargs:
kwargs["rstride"] = 4
if "cstride" not in kwargs:
kwargs["cstride"] = 4
if "alpha" not in kwargs:
kwargs["alpha"] = 0.2
# calculate the ellipsoid
# find the rotation matrix and radii of the axes
U, s, rotation = np.linalg.svd(hessian)
radii = 1.0 / np.sqrt(s)
# from polar coordinates
u = np.linspace(0.0, 2.0 * np.pi, 100)
v = np.linspace(0.0, np.pi, 100)
x = radii[0] * np.outer(np.cos(u), np.sin(v))
y = radii[1] * np.outer(np.sin(u), np.sin(v))
z = radii[2] * np.outer(np.ones_like(u), np.cos(v))
for i in range(len(x)):
for j in range(len(x)):
[x[i, j], y[i, j], z[i, j]] = np.dot([x[i, j], y[i, j], z[i, j]],
rotation) * rescale + center
# add the ellipsoid to the current axes
ax, fig, plt = get_ax3d_fig_plt(ax)
ax.plot_wireframe(x, y, z, **kwargs)
if arrows:
color = ('b', 'g', 'r')
em = np.zeros((3, 3))
for i in range(3):
em[i, :] = rotation[i, :] / np.linalg.norm(rotation[i, :])
for i in range(3):
ax.quiver3D(center[0], center[1], center[2], em[i, 0], em[i, 1],
em[i, 2], pivot='tail',
arrow_length_ratio=0.2, length=radii[i] * rescale,
color=color[i])
return fig, ax
| mit |
huggingface/transformers | examples/pytorch/language-modeling/run_clm.py | 1 | 22263 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=causal-lm
"""
# You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.testing_utils import CaptureLogger
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.9.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_overrides: Optional[str] = field(
default=None,
metadata={
"help": "Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def __post_init__(self):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path"
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
block_size: Optional[int] = field(
default=None,
metadata={
"help": "Optional input sequence length after tokenization. "
"The training dataset will be truncated in block of this size for training. "
"Default to the model max input length for single sentence inputs (take into account special tokens)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = (
data_args.train_file.split(".")[-1]
if data_args.train_file is not None
else data_args.validation_file.split(".")[-1]
)
if extension == "txt":
extension = "text"
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
model = AutoModelForCausalLM.from_config(config)
n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values())
logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
else:
column_names = raw_datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
# since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
def tokenize_function(examples):
with CaptureLogger(tok_logger) as cl:
output = tokenizer(examples[text_column_name])
# clm input could be much much longer than block_size
if "Token indices sequence length is longer than the" in cl.out:
tok_logger.warning(
"^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits before being passed to the model."
)
return output
with training_args.main_process_first(desc="dataset map tokenization"):
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if data_args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if data_args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(data_args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with training_args.main_process_first(desc="grouping texts together"):
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = lm_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = lm_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
# Data collator will default to DataCollatorWithPadding, so we change it.
data_collator=default_data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
try:
perplexity = math.exp(metrics["eval_loss"])
except OverflowError:
perplexity = float("inf")
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| apache-2.0 |
harrymunro/Simulations | toast_2.py | 1 | 3561 | # Smart Toast
# Simplified in version 2 to only factor in toasting time
# Simulates the intelligent toaster
import random
import os.path
import numpy as np
import csv
from sklearn import linear_model
# Every time the toast is finished the user tells the toaster whether the toast was under-done, over-done or just right
# This data, along with the toast time and bread width is saved into the database along along with the users name
# A ML algorithm uses the database to determine the optimal toasting time for the given parameters in order to achieve "just-right" toast
# This means that we have a toaster that learns from user preferences
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while True:
# Need to ask which user from list or enter new user
# check if userlist exists
userlist_exists = os.path.isfile("userlist.txt") # returns True if file exists
if userlist_exists == True:
text_file = open("userlist.txt", "r+b")
lines = text_file.readlines()
num_lines = len(lines)
choice = False
while choice == False:
print "\nChoose from list of current users by entering number, or enter name of a new user."
for n in range(num_lines):
print "%d - %s" % (n, lines[n])
user_choice = raw_input("Choice: ")
try:
if int(user_choice) in range(num_lines+1):
user_choice = lines[int(user_choice)]
user_choice = user_choice[:-1]
else:
w = str(user_choice) + '\n'
text_file.write(w)
except ValueError:
w = str(user_choice) + '\n'
text_file.write(w)
choice = True
elif userlist_exists == False:
text_file = open("userlist.txt", "w")
user_choice = raw_input("Welcome! Enter your user name: ")
text_file.write(str(user_choice)+"\n")
text_file.close()
filename = user_choice+".csv"
toast_time = raw_input("\nEnter toast time in seconds: ")
# Check whether CSV data file exists and read it, if not then create a default dataframe
file_exists = os.path.isfile(filename) # returns True if file exists
if file_exists == True and file_len(filename) > 2:
with open(filename, 'rb') as csvfile:
data = csv.reader(csvfile, delimiter=' ', quotechar='|')
csvfile.close()
# create input array
first = np.loadtxt(filename, skiprows = 1, usecols = (2,))
first = first.reshape(-1, 1)
# create predictor array
second = np.loadtxt(filename, skiprows = 1, usecols = (1,))
second = second.reshape(-1, 1)
blr = linear_model.LinearRegression()
clf = blr.fit(first, second)
toast_time = int(clf.predict(2))
elif file_exists == False:
with open(filename, 'a') as csvfile:
data = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
data.writerow(['User', 'Toast_Time', 'Satisfaction'])
csvfile.close()
raw_input("\nPress enter to start toasting!")
print "\nToasted bread for %d seconds." % toast_time
x = True
while x == True:
satisfaction = int(raw_input("\nHow was your toast?\n0.Vastly under-toasted\n1.Slightly under-toasted\n2.Just right\n3.Slightly burnt\n4.Badly burnt\nEnter the number and press enter: "))
if satisfaction in (0, 1, 2, 3, 4):
x = False
else:
print "That wasn't a correct option, please choose again.\n"
x = True
with open(filename, 'a') as csvfile:
data = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
data.writerow([user_choice, toast_time, satisfaction])
csvfile.close()
with open(filename, 'rb') as f:
reader = csv.reader(f)
for row in reader:
print row
f.close()
| mit |
Route49/521_FinalProject | lateralflow_final.py | 1 | 3070 | #! /usr/bin/python
#Team: Route49
#Names: Cindy Wong, Sonia Parra
#Date Modified: 12-1-2015
#Description: This program takes an image of a lateral flow strip
# and tests to see whether the strip is positive for wild type or
# mutant strains of the sample of interest
#------------Import Statemtents----------------------
import time
import picamera
import numpy as np
import cv2
import matplotlib.pyplot as plt
#------------Preview with Camera----------------------
video_capture = cv2.VideoCapture(0)
#video continuously runs until the user presses the 'q' key
while True:
ret, frame = video_capture.read()
frame[220:230, 495:545] = 0
frame[210:240, 515:525] = 0
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#release and destroy all the videos and windows
video_capture.release()
cv2.destroyAllWindows()
#------------Take picture with USB camera--------------
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #convert the image to a gray scale
cv2.imwrite('cellphone/cell4c.jpg', img) #writes the value to a file
#displays the image to the user
cv2.imshow('Sample', img)
cv2.waitKey(0)
cap.release
cv2.destroyAllWindows()
#------------Histogram Equalization-------------------
#finding the ROI for the wild type
ROIimgWT = img[0:150, 200:500]
cv2.imshow('ROI WT', ROIimgWT)
cv2.waitKey(0)
cv2.imwrite('cellphone/cellROI_WT4c.jpg', img)
cv2.destroyAllWindows()
#wild type histogram equalization
equWT = cv2.equalizeHist(ROIimgWT)
cv2.imshow('Equal Hist WT', equWT)
cv2.waitKey(0)
cv2.imwrite('cellphone/cellHist_WT4c.jpg', img)
cv2.destroyAllWindows()
#finding the ROI for the mutant
ROIimgMnt = img[320:460, 200:500]
cv2.imshow('ROI Mnt', ROIimgMnt)
cv2.waitKey(0)
cv2.imwrite('cellphone/cellROI_Mnt4c.jpg', img)
cv2.destroyAllWindows()
#mutant histogram equalization
equMnt = cv2.equalizeHist(ROIimgMnt)
cv2.imshow('Equal Hist Mnt', equMnt)
cv2.waitKey(0)
cv2.imwrite('cellphone/cellHist_Mnt4c.jpg',img)
cv2.destroyAllWindows()
#------------Extract Intensities along line----------
lineWT = equWT[75, 0:200]
lineMnt = equMnt[75, 0:200]
#plot intensities over the wild type line
plt.plot(lineWT)
plt.savefig('cellphone/cellWTplot4c.jpg')
plt.show()
#plot intensities over the mutant type line
plt.plot(lineMnt)
plt.savefig('cellphone/cellMntplot4c.jpg')
plt.show()
countWT = 0 #count of the number of points below the threshold for wild type
countMnt = 0 #count of the number of points below the threshold for mutant
#sums up all the intensity values for the wild type and mutant
for j in range(200):
if equWT[75,j] <= 55:
countWT = countWT + 1
if equMnt[75,j] <= 55:
countMnt = countMnt + 1
#calculates the change in intensity based on percentage for wild type
if countWT >= 20:
print 'positive WT sample'
else:
print 'negative WT sample'
#calculates change in intensity based on percentage for mutant
if countMnt >= 35:
print 'positive Mnt sample'
else:
print 'negative Mnt sample'
| gpl-2.0 |
CforED/Machine-Learning | sklearn/metrics/regression.py | 9 | 17386 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Manoj Kumar <[email protected]>
# Michael Eickenberg <[email protected]>
# Konstantin Shmelkov <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
jreback/pandas | pandas/tests/tools/test_to_time.py | 8 | 2019 | from datetime import time
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import Series
import pandas._testing as tm
from pandas.core.tools.datetimes import to_time as to_time_alias
from pandas.core.tools.times import to_time
class TestToTime:
@td.skip_if_has_locale
def test_parsers_time(self):
# GH#11818
strings = [
"14:15",
"1415",
"2:15pm",
"0215pm",
"14:15:00",
"141500",
"2:15:00pm",
"021500pm",
time(14, 15),
]
expected = time(14, 15)
for time_string in strings:
assert to_time(time_string) == expected
new_string = "14.15"
msg = r"Cannot convert arg \['14\.15'\] to a time"
with pytest.raises(ValueError, match=msg):
to_time(new_string)
assert to_time(new_string, format="%H.%M") == expected
arg = ["14:15", "20:20"]
expected_arr = [time(14, 15), time(20, 20)]
assert to_time(arg) == expected_arr
assert to_time(arg, format="%H:%M") == expected_arr
assert to_time(arg, infer_time_format=True) == expected_arr
assert to_time(arg, format="%I:%M%p", errors="coerce") == [None, None]
res = to_time(arg, format="%I:%M%p", errors="ignore")
tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))
msg = "Cannot convert.+to a time with given format"
with pytest.raises(ValueError, match=msg):
to_time(arg, format="%I:%M%p", errors="raise")
tm.assert_series_equal(
to_time(Series(arg, name="test")), Series(expected_arr, name="test")
)
res = to_time(np.array(arg))
assert isinstance(res, list)
assert res == expected_arr
def test_to_time_alias():
expected = time(14, 15)
with tm.assert_produces_warning(FutureWarning):
result = to_time_alias(expected)
assert result == expected
| bsd-3-clause |
MIREL-UNC/wikipedia-ner | wikipedianer/classification/base.py | 1 | 1989 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import pandas
import numpy
from sklearn.metrics import (accuracy_score, precision_score, recall_score,
f1_score)
class BaseClassifier(object):
def __init__(self):
self.test_results = pandas.DataFrame(
columns=['accuracy', 'class', 'precision', 'recall', 'fscore'])
if not hasattr(self, 'cl_iteration'):
self.cl_iteration = 1
def add_test_results(self, accuracy, precision, recall, fscore, classes,
y_true=None):
self.test_results = self.test_results.append({'accuracy': accuracy},
ignore_index=True)
if y_true is None:
y_true = numpy.array([])
for cls_idx, cls in enumerate(classes):
self.test_results = self.test_results.append({
'class': cls,
'precision': precision[cls_idx],
'recall': recall[cls_idx],
'fscore': fscore[cls_idx],
'support': (y_true == cls_idx).sum()
}, ignore_index=True)
def get_metrics(self, y_true, y_pred, return_extras=True):
accuracy = accuracy_score(y_true, y_pred.astype(y_true.dtype))
if not return_extras:
return accuracy
else:
labels = numpy.arange(self.dataset.output_size(self.cl_iteration))
precision = precision_score(y_true, y_pred, labels=labels,
average=None)
recall = recall_score(y_true, y_pred, labels=labels, average=None)
fscore = f1_score(y_true, y_pred, labels=labels, average=None)
return accuracy, precision, recall, fscore, y_true, y_pred
def evaluate(self, dataset_name, *args, **kwargs):
raise NotImplementedError
def train(self, *args, **kwargs):
raise NotImplementedError
| gpl-3.0 |
brainstorm/bcbio-nextgen | bcbio/variation/coverage.py | 1 | 16399 | """Examine and query coverage in sequencing experiments.
Provides estimates of coverage intervals based on callable regions
"""
import itertools
import os
import shutil
import yaml
import pybedtools
import pandas as pd
import numpy as np
import pysam
from bcbio.variation.bedutils import clean_file
from bcbio.utils import (file_exists, chdir, safe_makedir,
append_stem, copy_plus)
from bcbio import utils
from bcbio.bam import ref, sambamba
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.pipeline import shared
def assign_interval(data):
"""Identify coverage based on percent of genome covered and relation to targets.
Classifies coverage into 3 categories:
- genome: Full genome coverage
- regional: Regional coverage, like exome capture, with off-target reads
- amplicon: Amplication based regional coverage without off-target reads
"""
genome_cov_thresh = 0.40 # percent of genome covered for whole genome analysis
offtarget_thresh = 0.05 # percent of offtarget reads required to be capture (not amplification) based
if not dd.get_coverage_interval(data):
vrs = dd.get_variant_regions_merged(data)
callable_file = dd.get_sample_callable(data)
if vrs:
callable_size = pybedtools.BedTool(vrs).total_coverage()
else:
callable_size = pybedtools.BedTool(callable_file).total_coverage()
total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
genome_cov_pct = callable_size / float(total_size)
if genome_cov_pct > genome_cov_thresh:
cov_interval = "genome"
offtarget_pct = 0.0
elif not vrs:
cov_interval = "regional"
offtarget_pct = 0.0
else:
offtarget_pct = _count_offtarget(data, dd.get_align_bam(data) or dd.get_work_bam(data),
vrs or callable_file, "variant_regions")
if offtarget_pct > offtarget_thresh:
cov_interval = "regional"
else:
cov_interval = "amplicon"
logger.info("%s: Assigned coverage as '%s' with %.1f%% genome coverage and %.1f%% offtarget coverage"
% (dd.get_sample_name(data), cov_interval, genome_cov_pct * 100.0, offtarget_pct * 100.0))
data["config"]["algorithm"]["coverage_interval"] = cov_interval
return data
def _count_offtarget(data, bam_file, bed_file, target_name):
mapped_unique = sambamba.number_of_mapped_reads(data, bam_file, keep_dups=False)
ontarget = sambamba.number_of_mapped_reads(
data, bam_file, keep_dups=False, bed_file=bed_file, target_name=target_name)
if mapped_unique:
return float(mapped_unique - ontarget) / mapped_unique
return 0.0
def calculate(bam_file, data):
"""Calculate coverage in parallel using samtools depth through goleft.
samtools depth removes duplicates and secondary reads from the counts:
if ( b->core.flag & (BAM_FUNMAP | BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP) ) continue;
"""
params = {"window_size": 5000, "parallel_window_size": 1e5, "min": dd.get_coverage_depth_min(data),
"high_multiplier": 20}
prefix = os.path.join(
utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))),
"%s-coverage" % (dd.get_sample_name(data)))
depth_file = prefix + ".depth.bed"
callable_file = prefix + ".callable.bed"
variant_regions = dd.get_variant_regions_merged(data)
variant_regions_avg_cov = get_average_coverage(data, bam_file, variant_regions, "variant_regions")
if not utils.file_uptodate(callable_file, bam_file):
ref_file = dd.get_ref_file(data)
cmd = ["goleft", "depth", "--q", "1",
"--mincov", str(params["min"]), "--reference", ref_file,
"--processes", str(dd.get_num_cores(data)), "--ordered"]
max_depth = _get_max_depth(variant_regions_avg_cov, params, data)
if max_depth:
cmd += ["--maxmeandepth", str(int(max_depth))]
with file_transaction(data, depth_file) as tx_depth_file:
with utils.chdir(os.path.dirname(tx_depth_file)):
tx_callable_file = tx_depth_file.replace(".depth.bed", ".callable.bed")
prefix = tx_depth_file.replace(".depth.bed", "")
cmd += ["--prefix", prefix, bam_file]
bcbio_env = utils.get_bcbio_env()
msg = "Calculate coverage: %s" % dd.get_sample_name(data)
do.run(cmd, msg, env=bcbio_env)
shutil.move(tx_callable_file, callable_file)
final_callable = _subset_to_variant_regions(callable_file, variant_regions, data)
return depth_file, final_callable, _extract_highdepth(final_callable, data), variant_regions_avg_cov
def _create_genome_regions(callable_file, data):
"""Create whole genome contigs we want to process, only non-alts.
Skips problem contigs like HLAs for downstream analysis.
"""
variant_regions = "%s-genome.bed" % utils.splitext_plus(callable_file)[0]
with file_transaction(data, variant_regions) as tx_variant_regions:
with open(tx_variant_regions, "w") as out_handle:
for c in shared.get_noalt_contigs(data):
out_handle.write("%s\t%s\t%s\n" % (c.name, 0, c.size))
return variant_regions
def _subset_to_variant_regions(callable_file, variant_regions, data):
"""Subset output callable file to only variant regions of interest.
"""
out_file = "%s-vrsubset.bed" % utils.splitext_plus(callable_file)[0]
if not utils.file_uptodate(out_file, callable_file):
if not variant_regions:
variant_regions = _create_genome_regions(callable_file, data)
with file_transaction(data, out_file) as tx_out_file:
pybedtools.BedTool(callable_file).intersect(variant_regions).saveas(tx_out_file)
return out_file
def _extract_highdepth(callable_file, data):
out_file = callable_file.replace(".callable.bed", ".highdepth.bed")
if not utils.file_uptodate(out_file, callable_file):
with file_transaction(data, out_file) as tx_out_file:
with open(callable_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
parts = line.strip().split("\t")
if "EXCESSIVE_COVERAGE" in parts:
out_handle.write("\t".join(parts[:3] + ["highdepth"]) + "\n")
return out_file
def _get_max_depth(average_coverage, params, data):
"""Calculate maximum depth based on a rough multiplier of average coverage.
"""
if dd.get_coverage_interval(data) == "genome":
avg_cov = min(30.0, average_coverage)
return avg_cov * params["high_multiplier"]
def _get_cache_file(data, target_name):
prefix = os.path.join(
utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data))),
"%s-coverage" % (dd.get_sample_name(data)))
cache_file = prefix + "-" + target_name + "-stats.yaml"
return cache_file
def _read_cache(cache_file, reuse_cmp_files):
reuse_cmp_file = [fn for fn in reuse_cmp_files if fn]
if all(utils.file_uptodate(cache_file, fn) for fn in reuse_cmp_file):
with open(cache_file) as in_handle:
return yaml.safe_load(in_handle)
return dict()
def _write_cache(cache, cache_file):
with open(cache_file, "w") as out_handle:
yaml.safe_dump(cache, out_handle, default_flow_style=False, allow_unicode=False)
def get_average_coverage(data, bam_file, bed_file=None, target_name="genome"):
cache_file = _get_cache_file(data, target_name)
cache = _read_cache(cache_file, [bam_file, bed_file])
if "avg_coverage" in cache:
return cache["avg_coverage"]
if bed_file:
avg_cov = _average_bed_coverage(data, bed_file, bam_file, target_name=target_name)
else:
avg_cov = _average_genome_coverage(data, bam_file)
cache["avg_coverage"] = avg_cov
_write_cache(cache, cache_file)
return avg_cov
def _average_genome_coverage(data, bam_file):
total = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
read_counts = sambamba.number_of_mapped_reads(data, bam_file, keep_dups=False)
with pysam.Samfile(bam_file, "rb") as pysam_bam:
read_size = np.median(list(itertools.islice((a.query_length for a in pysam_bam.fetch()), 1e5)))
avg_cov = float(read_counts * read_size) / total
return avg_cov
def _average_bed_coverage(data, bed_file, bam_file, target_name):
sambamba_depth_file = regions_coverage(data, bed_file, bam_file, target_name)
avg_covs = []
mean_cov_col = None
total_len = 0
with open(sambamba_depth_file) as fh:
for line in fh:
if line.startswith('#'):
mean_cov_col = line.split('\t').index('meanCoverage')
continue
line_tokens = line.replace('\n', '').split()
start, end = map(int, line_tokens[1:3])
size = end - start
avg_covs.append(float(line_tokens[mean_cov_col]) * size)
total_len += size
avg_cov = sum(avg_covs) / total_len if total_len > 0 else 0
return avg_cov
def checkpoint(stem):
def check_file(f):
def wrapper(*args, **kwargs):
out_file = append_stem(args[0], stem)
if file_exists(out_file):
logger.debug("Skipping %s" % out_file)
return out_file
return f(*args, **kwargs)
return wrapper
return check_file
@checkpoint("_summary")
def _calculate_percentiles(in_file, sample, data=None, cutoffs=None):
"""
Parse pct bases per region to summarize it in
7 different pct of regions points with pct bases covered
higher than a completeness cutoff (5, 10, 20, 50 ...)
"""
has_data = False
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
has_data = True
break
if not has_data:
return in_file
out_file = append_stem(in_file, "_summary")
out_total_file = append_stem(in_file, "_total_summary")
dt = pd.read_csv(in_file, sep="\t", index_col=False)
pct = dict()
pct_bases = dict()
size = np.array(dt["chromEnd"]) - np.array(dt["chromStart"])
for cutoff in [h for h in list(dt) if h.startswith("percentage")]:
if cutoffs and int(cutoff.split("percentage")[1]) in cutoffs:
a = np.array(dt[cutoff])
for p_point in [0.01, 10, 25, 50, 75, 90, 99.9]:
q = np.percentile(a, p_point)
pct[(cutoff, p_point)] = q
pct_bases[cutoff] = sum(size * a)/float(sum(size))
with file_transaction(data, out_total_file) as tx_file:
with open(tx_file, 'w') as out_handle:
print >>out_handle, "cutoff_reads\tbases_pct\tsample"
for k in pct_bases:
print >>out_handle, "\t".join(map(str, [k, pct_bases[k], sample]))
with file_transaction(data, out_file) as tx_file:
with open(tx_file, 'w') as out_handle:
print >>out_handle, "cutoff_reads\tregion_pct\tbases_pct\tsample"
for k in pct:
print >>out_handle, "\t".join(map(str, [k[0], k[1], pct[k], sample]))
# To move metrics to multiqc, will remove older files
# when bcbreport accepts these one, to avoid errors
# while porting everything to multiqc
# These files will be copied to final
out_file_fixed = os.path.join(os.path.dirname(out_file), "%s_bcbio_coverage.txt" % sample)
out_total_fixed = os.path.join(os.path.dirname(out_file), "%s_bcbio_coverage_avg.txt" % sample)
copy_plus(out_file, out_file_fixed)
copy_plus(out_total_file, out_total_fixed)
return out_file_fixed
def _read_regions(fn):
"""
Save in a dict the position of regions with
the information of the coverage stats.
"""
regions = {}
with open(fn) as in_handle:
for line in in_handle:
if line.startswith("chrom"):
regions["header"] = line.strip()
continue
idx = "".join(line.split("\t")[:2])
regions[idx] = line.strip()
return regions
@checkpoint("_fixed")
def _add_high_covered_regions(in_file, bed_file, sample, data=None):
"""
Add regions with higher coverage than the limit
as fully covered.
"""
out_file = append_stem(in_file, "_fixed")
regions = _read_regions(in_file)
with file_transaction(data, out_file) as out_tx:
with open(bed_file) as in_handle:
with open(out_tx, 'w') as out_handle:
if "header" in regions:
print >>out_handle, regions["header"]
for line in in_handle:
idx = "".join(line.split("\t")[:2])
if idx not in regions:
print >>out_handle, "%s\t1000\t1000\t100\t100\t100\t100\t100\t100\t100\t100\t100\t100\t%s" % (line.strip(), sample)
else:
print >>out_handle, regions[idx]
return out_file
def _summary_variants(in_file, out_file, data=None):
"""Parse GC and depth variant file
to be ready for multiqc.
"""
dt = pd.read_csv(in_file, sep="\t", index_col=False,
dtype={"CG": np.float64, "depth": np.float64}, na_values=["."]).dropna()
row = list()
with file_transaction(data, out_file) as out_tx:
cg = dt["CG"]
d = dt["depth"]
for p_point in [0.01, 10, 25, 50, 75, 90, 99.9, 100]:
if len(cg) > 0:
q_cg = np.percentile(cg, p_point)
else:
q_cg = 0
if len(d) > 0:
q_d = np.percentile(d, p_point)
else:
q_d = 0
row.append([p_point, q_d, q_cg])
pd.DataFrame(row).to_csv(out_tx, header=["pct_variants", "depth", "cg"], index=False, sep="\t")
def regions_coverage(data, bed_file, bam_file, target_name, depth_thresholds=None):
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "coverage", dd.get_sample_name(data)))
out_file = os.path.join(work_dir, target_name + "_regions_depth.bed")
if utils.file_uptodate(out_file, bam_file) and utils.file_uptodate(out_file, bed_file):
return out_file
with file_transaction(data, out_file) as tx_out_file:
cmdl = sambamba.make_command(data, "depth region", bam_file, bed_file, depth_thresholds=depth_thresholds)
cmdl += " -o " + tx_out_file
message = "Calculating regions coverage of {target_name} in {bam_file}"
do.run(cmdl, message.format(**locals()))
return out_file
def coverage_region_detailed_stats(data, out_dir, extra_cutoffs=None):
"""
Calculate coverage at different completeness cutoff
for region in coverage option.
"""
bed_file = dd.get_coverage(data)
if not bed_file or not utils.file_exists(bed_file):
return None
work_dir = safe_makedir(out_dir)
cleaned_bed = clean_file(bed_file, data, prefix="cov-", simple=True)
cutoffs = {1, 5, 10, 20, 50, 100, 250, 500, 1000, 5000, 10000, 50000}
with chdir(work_dir):
in_bam = dd.get_align_bam(data) or dd.get_work_bam(data)
sample = dd.get_sample_name(data)
logger.debug("doing coverage for %s" % sample)
parse_file = os.path.join(sample + "_coverage.bed")
if utils.file_uptodate(parse_file, cleaned_bed) and utils.file_uptodate(parse_file, in_bam):
pass
else:
with file_transaction(data, parse_file) as out_tx:
depth_thresholds = sorted(list(cutoffs | extra_cutoffs))
cmdl = sambamba.make_command(data, "depth region", in_bam, cleaned_bed, depth_thresholds=depth_thresholds)
cmdl += " | sed 's/# chrom/chrom/' > " + out_tx
do.run(cmdl, "Run coverage regional analysis for {}".format(sample))
parse_file = _calculate_percentiles(os.path.abspath(parse_file), sample, data=data, cutoffs=cutoffs)
return os.path.abspath(parse_file)
| mit |
rs2/pandas | pandas/tests/indexes/datetimes/test_shift.py | 2 | 5290 | from datetime import datetime
import pytest
import pytz
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import DatetimeIndex, Series, date_range
import pandas._testing as tm
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexShift:
# -------------------------------------------------------------
# DatetimeIndex.shift is used in integer addition
def test_dti_shift_tzaware(self, tz_naive_fixture):
# GH#9903
tz = tz_naive_fixture
idx = pd.DatetimeIndex([], name="xxx", tz=tz)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
idx = pd.DatetimeIndex(
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.DatetimeIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.DatetimeIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_dti_shift_freqs(self):
# test shift for DatetimeIndex and non DatetimeIndex
# GH#8083
drange = pd.date_range("20130101", periods=5)
result = drange.shift(1)
expected = pd.DatetimeIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
freq="D",
)
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = pd.DatetimeIndex(
["2012-12-31", "2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04"],
freq="D",
)
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq="2D")
expected = pd.DatetimeIndex(
["2013-01-07", "2013-01-08", "2013-01-09", "2013-01-10", "2013-01-11"],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_dti_shift_int(self):
rng = date_range("1/1/2000", periods=20)
result = rng + 5 * rng.freq
expected = rng.shift(5)
tm.assert_index_equal(result, expected)
result = rng - 5 * rng.freq
expected = rng.shift(-5)
tm.assert_index_equal(result, expected)
def test_dti_shift_no_freq(self):
# GH#19147
dti = pd.DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
dti.shift(2)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_shift_localized(self, tzstr):
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
dr_tz = dr.tz_localize(tzstr)
result = dr_tz.shift(1, "10T")
assert result.tz == dr_tz.tz
def test_dti_shift_across_dst(self):
# GH 8616
idx = date_range("2013-11-03", tz="America/Chicago", periods=7, freq="H")
s = Series(index=idx[:-1], dtype=object)
result = s.shift(freq="H")
expected = Series(index=idx[1:], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"shift, result_time",
[
[0, "2014-11-14 00:00:00"],
[-1, "2014-11-13 23:00:00"],
[1, "2014-11-14 01:00:00"],
],
)
def test_dti_shift_near_midnight(self, shift, result_time):
# GH 8616
dt = datetime(2014, 11, 14, 0)
dt_est = pytz.timezone("EST").localize(dt)
s = Series(data=[1], index=[dt_est])
result = s.shift(shift, freq="H")
expected = Series(1, index=DatetimeIndex([result_time], tz="EST"))
tm.assert_series_equal(result, expected)
def test_shift_periods(self):
# GH#22458 : argument 'n' was deprecated in favor of 'periods'
idx = pd.date_range(start=START, end=END, periods=3)
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
@pytest.mark.parametrize("freq", ["B", "C"])
def test_shift_bday(self, freq):
rng = date_range(START, END, freq=freq)
shifted = rng.shift(5)
assert shifted[0] == rng[5]
assert shifted.freq == rng.freq
shifted = rng.shift(-5)
assert shifted[5] == rng[0]
assert shifted.freq == rng.freq
shifted = rng.shift(0)
assert shifted[0] == rng[0]
assert shifted.freq == rng.freq
def test_shift_bmonth(self):
rng = date_range(START, END, freq=pd.offsets.BMonthEnd())
shifted = rng.shift(1, freq=pd.offsets.BDay())
assert shifted[0] == rng[0] + pd.offsets.BDay()
rng = date_range(START, END, freq=pd.offsets.BMonthEnd())
with tm.assert_produces_warning(pd.errors.PerformanceWarning):
shifted = rng.shift(1, freq=pd.offsets.CDay())
assert shifted[0] == rng[0] + pd.offsets.CDay()
| bsd-3-clause |
LUTAN/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 30 | 4292 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is an example of using convolutional networks over characters for
DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
def char_cnn_model(features, target):
"""Character level convolutional neural network model to predict classes."""
target = tf.one_hot(target, 15, 1, 0)
byte_list = tf.reshape(
tf.one_hot(features, 256), [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.contrib.layers.convolution2d(
byte_list, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a ReLU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convolution+Relu.
pool1 = tf.nn.max_pool(
conv1,
ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1],
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.contrib.layers.convolution2d(
pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.contrib.layers.fully_connected(pool2, 15, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = learn.Estimator(model_fn=char_cnn_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
jmschrei/scikit-learn | sklearn/semi_supervised/label_propagation.py | 35 | 15442 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse = ['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
Nyker510/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
arjunkhode/ASP | lectures/08-Sound-transformations/plots-code/sineModelTimeScale-functions.py | 24 | 2725 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time, math
from scipy.interpolate import interp1d
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/mridangam.wav')
x1 = x[:int(1.49*fs)]
w = np.hamming(801)
N = 2048
t = -90
minSineDur = .005
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
sfreq, smag, sphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
timeScale = np.array([.01, .0, .03, .03, .335, .8, .355, .82, .671, 1.0, .691, 1.02, .858, 1.1, .878, 1.12, 1.185, 1.8, 1.205, 1.82, 1.49, 2.0])
L = sfreq[:,0].size # number of input frames
maxInTime = max(timeScale[::2]) # maximum value used as input times
maxOutTime = max(timeScale[1::2]) # maximum value used in output times
outL = int(L*maxOutTime/maxInTime) # number of output frames
inFrames = L*timeScale[::2]/maxInTime # input time values in frames
outFrames = outL*timeScale[1::2]/maxOutTime # output time values in frames
timeScalingEnv = interp1d(outFrames, inFrames, fill_value=0) # interpolation function
indexes = timeScalingEnv(np.arange(outL)) # generate frame indexes for the output
ysfreq = sfreq[round(indexes[0]),:] # first output frame
ysmag = smag[round(indexes[0]),:] # first output frame
for l in indexes[1:]: # generate frames for output sine tracks
ysfreq = np.vstack((ysfreq, sfreq[round(l),:]))
ysmag = np.vstack((ysmag, smag[round(l),:]))
mag1 = np.sum(10**(smag/20), axis=1)
mag2 = np.sum(10**(ysmag/20), axis=1)
mag1 = 20*np.log10(mag1)
mag2 = 20*np.log10(mag2)
plt.figure(1, figsize=(9, 7))
maxplotfreq = 4000.0
plt.subplot(3,1,1)
plt.plot(H*indexes/float(fs), H*np.arange(outL)/float(fs), color='k', lw=1.5)
plt.autoscale(tight=True)
plt.xlabel('input times')
plt.ylabel('output times')
plt.title('output scaling')
plt.subplot(3,1,2)
plt.plot(H*np.arange(mag1.size)/float(fs), mag1, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('input magnitude sines')
plt.subplot(3,1,3)
plt.plot(H*np.arange(mag2.size)/float(fs), mag2, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('output magnitude sines')
plt.tight_layout()
plt.savefig('sineModelTimeScale-functions.png')
plt.show()
| agpl-3.0 |
DGrady/pandas | pandas/tests/sparse/test_groupby.py | 18 | 1736 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseGroupBy(object):
def setup_method(self, method):
self.dense = pd.DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8),
'E': [np.nan, np.nan, 1, 2,
np.nan, 1, np.nan, np.nan]})
self.sparse = self.dense.to_sparse()
def test_first_last_nth(self):
# tests for first / last / nth
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
tm.assert_frame_equal(sparse_grouped.first(),
dense_grouped.first())
tm.assert_frame_equal(sparse_grouped.last(),
dense_grouped.last())
tm.assert_frame_equal(sparse_grouped.nth(1),
dense_grouped.nth(1))
def test_aggfuncs(self):
sparse_grouped = self.sparse.groupby('A')
dense_grouped = self.dense.groupby('A')
tm.assert_frame_equal(sparse_grouped.mean(),
dense_grouped.mean())
# ToDo: sparse sum includes str column
# tm.assert_frame_equal(sparse_grouped.sum(),
# dense_grouped.sum())
tm.assert_frame_equal(sparse_grouped.count(),
dense_grouped.count())
| bsd-3-clause |
ralbayaty/KaggleRetina | testing/makePatches.py | 1 | 1890 | from skimage.color import rgb2gray
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import sys
import os
import numpy as np
from math import floor
import cPickle as pickle
import simplejson
def pickle_keypoints(keypoints, descriptors):
temp_array = []
for i in range(len(descriptors)):
temp = (keypoints[i], descriptors[i])
temp_array.append(temp)
return temp_array
#########
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file.")
img = cv2.imread(file_name, 1)
m, n, channels = img.shape
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_ = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keeping aspect ratio
w1, w2 = 201, 201
# Need to add 1 to each if w1 or w2 is even
center1 = int(floor(w1/2))
center2 = int(floor(w2/2))
black_thres = 0.50 # percentage of image that is black
N = 100
patches = []
# if m > w1 or n > w2:
# gray_.thumbnail((m,n), Image.ANTIALIAS)
while len(patches) < N:
# select a random center location for the patch from the image
rand_m = np.random.randint(0+center1, m-center1)
rand_n = np.random.randint(0+center2, n-center2)
# Ensure random selected pixel locations are valid
assert rand_m-center1 >= 0
assert rand_m+center1 <= m
assert rand_n-center2 >= 0
assert rand_n+center2 <= n
patch = np.copy(gray[(rand_m-center1):(rand_m+center1), (rand_n-center2):(rand_n+center2)])
hist_full = cv2.calcHist([patch], [0], None, [256], [0, 256])
if sum(hist_full) > 0:
hist_full = np.divide(hist_full, sum(hist_full))
if hist_full[0] < black_thres:
patches.append(patch)
cv2.imshow('patch', np.asarray(patch))
if 0xFF & cv2.waitKey(50) == 27:
pass
cv2.destroyAllWindows()
print("Finished! " + str(len(patches)) + " patches created.") | gpl-2.0 |
edhuckle/statsmodels | statsmodels/tsa/stattools.py | 26 | 37127 | """
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types, lzip,
zip, map)
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from .tsatools import lagmat, lagmat2ds, add_trend
from .adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa.arima_model import ARMA
from statsmodels.compat.scipy import _next_regular
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller']
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximimizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class.
modargs : tuple
args to pass to model. See notes.
fitargs : tuple
args to pass to fit. See notes.
lagstart : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : str {"aic","bic","t-stat"}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
'''
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : str {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterium
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant at
the 95 % level.
store : bool
If True, then a result instance is returned additionally to
the adf statistic (default is False)
regresults : bool
If True, the full regression results are returned (default is False)
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
usedlag : int
Number of lags used.
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values.
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
regresults : RegressionResults instance
The
resstore : (optional) instance of ResultStore
an instance of a dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables.
If the p-value is close to significant, then the critical values should be
used to judge whether to accept or reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
see example script
References
----------
Greene
Hamilton
P-Values (regression surface approximation)
MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
Critical values
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
'''
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, int):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False):
'''
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
Returns
-------
acovf : array
autocovariance function
'''
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
n = len(x)
if demean:
xo = x - x.mean()
else:
xo = x
if unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
else:
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
Frf = np.fft.fft(xo, n=nobs * 2)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[n - 1:]
return acov.real
else:
return (np.correlate(xo, xo, 'full') / d)[n - 1:]
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None):
'''
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
nobs = len(x)
d = nobs # changes if unbiased
if not fft:
avf = acovf(x, unbiased=unbiased, demean=True)
#acf = np.take(avf/avf[0], range(1,nlags+1))
acf = avf[:nlags + 1] / avf[0]
else:
x = np.squeeze(np.asarray(x))
#JP: move to acovf
x0 = x - x.mean()
# ensure that we always use a power of 2 or 3 for zero-padding,
# this way we'll ensure O(n log n) runtime of the fft.
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(x0, n=n) # zero-pad for separability
if unbiased:
d = nobs - np.arange(nobs)
acf = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d
acf /= acf[0]
#acf = np.take(np.real(acf), range(1,nlags+1))
acf = np.real(acf[:nlags + 1]) # keep lag 0
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
'''Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'ywunbiased' (default) or 'ywmle' or 'ols'
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
'''
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d, (nobs,2)
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y1, y2, regression="c"):
"""
This is a simple cointegration test. Uses unit-root test on residuals to
test for cointegrated relationship
See Hamilton (1994) 19.2
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
c : str {'c'}
Included in regression
* 'c' : Constant
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values are obtained through regression surface approximation from
MacKinnon 1994.
References
----------
MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
"""
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
y1 = np.asarray(y1)
y2 = np.asarray(y2)
if regression == 'c':
y2 = add_constant(y2, prepend=False)
st1_resid = OLS(y1, y2).fit().resid # stage one residuals
lgresid_cons = add_constant(st1_resid[0:-1], prepend=False)
uroot_reg = OLS(st1_resid[1:], lgresid_cons).fit()
coint_t = (uroot_reg.params[0] - 1) / uroot_reg.bse[0]
pvalue = mackinnonp(coint_t, regression="c", N=2, lags=None)
crit_value = mackinnoncrit(N=1, regression="c", nobs=len(y1))
return coint_t, pvalue, crit_value
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ((hasattr(error, 'message') and 'initial' not in error.message)
or 'initial' in str(error)): # py2 and py3
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.macrodata.load().data
x = data['realgdp']
# adf is tested now.
adf = adfuller(x, 4, autolag=None)
adfbic = adfuller(x, autolag="bic")
adfaic = adfuller(x, autolag="aic")
adftstat = adfuller(x, autolag="t-stat")
# acf is tested now
acf1, ci1, Q, pvalue = acf(x, nlags=40, confint=95, qstat=True)
acf2, ci2, Q2, pvalue2 = acf(x, nlags=40, confint=95, fft=True, qstat=True)
acf3, ci3, Q3, pvalue3 = acf(x, nlags=40, confint=95, qstat=True,
unbiased=True)
acf4, ci4, Q4, pvalue4 = acf(x, nlags=40, confint=95, fft=True, qstat=True,
unbiased=True)
# pacf is tested now
# pacf1 = pacorr(x)
# pacfols = pacf_ols(x, nlags=40)
# pacfyw = pacf_yw(x, nlags=40, method="mle")
y = np.random.normal(size=(100, 2))
grangercausalitytests(y, 2)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.